idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
17,200
def validation_statuses ( self , area_uuid ) : path = "/area/{uuid}/validations" . format ( uuid = area_uuid ) result = self . _make_request ( 'get' , path ) return result . json ( )
Get count of validation statuses for all files in upload_area
17,201
def language_name ( self , text : str ) -> str : values = extract ( text ) input_fn = _to_func ( ( [ values ] , [ ] ) ) pos : int = next ( self . _classifier . predict_classes ( input_fn = input_fn ) ) LOGGER . debug ( "Predicted language position %s" , pos ) return sorted ( self . languages ) [ pos ]
Predict the programming language name of the given source code .
17,202
def scores ( self , text : str ) -> Dict [ str , float ] : values = extract ( text ) input_fn = _to_func ( ( [ values ] , [ ] ) ) prediction = self . _classifier . predict_proba ( input_fn = input_fn ) probabilities = next ( prediction ) . tolist ( ) sorted_languages = sorted ( self . languages ) return dict ( zip ( sorted_languages , probabilities ) )
A score for each language corresponding to the probability that the text is written in the given language . The score is a float value between 0 . 0 and 1 . 0
17,203
def probable_languages ( self , text : str , max_languages : int = 3 ) -> Tuple [ str , ... ] : scores = self . scores ( text ) sorted_scores = sorted ( scores . items ( ) , key = itemgetter ( 1 ) , reverse = True ) languages , probabilities = list ( zip ( * sorted_scores ) ) rescaled_probabilities = [ log ( proba ) for proba in probabilities ] distances = [ rescaled_probabilities [ pos ] - rescaled_probabilities [ pos + 1 ] for pos in range ( len ( rescaled_probabilities ) - 1 ) ] max_distance_pos = max ( enumerate ( distances , 1 ) , key = itemgetter ( 1 ) ) [ 0 ] limit = min ( max_distance_pos , max_languages ) return languages [ : limit ]
List of most probable programming languages the list is ordered from the most probable to the least probable one .
17,204
def learn ( self , input_dir : str ) -> float : if self . is_default : LOGGER . error ( "Cannot learn using default model" ) raise GuesslangError ( 'Cannot learn using default "readonly" model' ) languages = self . languages LOGGER . info ( "Extract training data" ) extensions = [ ext for exts in languages . values ( ) for ext in exts ] files = search_files ( input_dir , extensions ) nb_files = len ( files ) chunk_size = min ( int ( CHUNK_PROPORTION * nb_files ) , CHUNK_SIZE ) LOGGER . debug ( "Evaluation files count: %d" , chunk_size ) LOGGER . debug ( "Training files count: %d" , nb_files - chunk_size ) batches = _pop_many ( files , chunk_size ) LOGGER . debug ( "Prepare evaluation data" ) evaluation_data = extract_from_files ( next ( batches ) , languages ) LOGGER . debug ( "Evaluation data count: %d" , len ( evaluation_data [ 0 ] ) ) accuracy = 0 total = ceil ( nb_files / chunk_size ) - 1 LOGGER . info ( "Start learning" ) for pos , training_files in enumerate ( batches , 1 ) : LOGGER . info ( "Step %.2f%%" , 100 * pos / total ) LOGGER . debug ( "Training data extraction" ) training_data = extract_from_files ( training_files , languages ) LOGGER . debug ( "Training data count: %d" , len ( training_data [ 0 ] ) ) steps = int ( FITTING_FACTOR * len ( training_data [ 0 ] ) / 100 ) LOGGER . debug ( "Fitting, steps count: %d" , steps ) self . _classifier . fit ( input_fn = _to_func ( training_data ) , steps = steps ) LOGGER . debug ( "Evaluation" ) accuracy = self . _classifier . evaluate ( input_fn = _to_func ( evaluation_data ) , steps = 1 ) [ 'accuracy' ] _comment ( accuracy ) return accuracy
Learn languages features from source files .
17,205
def main ( ) : parser = argparse . ArgumentParser ( description = __doc__ ) parser . add_argument ( 'reportfile' , type = argparse . FileType ( 'r' ) , help = "test report file generated by `guesslang --test TESTDIR`" ) parser . add_argument ( '-d' , '--debug' , default = False , action = 'store_true' , help = "show debug messages" ) args = parser . parse_args ( ) config_logging ( args . debug ) report = json . load ( args . reportfile ) graph_data = _build_graph ( report ) index_path = _prepare_resources ( graph_data ) webbrowser . open ( str ( index_path ) )
Report graph creator command line
17,206
def search_files ( source : str , extensions : List [ str ] ) -> List [ Path ] : files = [ path for path in Path ( source ) . glob ( '**/*' ) if path . is_file ( ) and path . suffix . lstrip ( '.' ) in extensions ] nb_files = len ( files ) LOGGER . debug ( "Total files found: %d" , nb_files ) if nb_files < NB_FILES_MIN : LOGGER . error ( "Too few source files" ) raise GuesslangError ( '{} source files found in {}. {} files minimum is required' . format ( nb_files , source , NB_FILES_MIN ) ) random . shuffle ( files ) return files
Retrieve files located the source directory and its subdirectories whose extension match one of the listed extensions .
17,207
def extract_from_files ( files : List [ Path ] , languages : Dict [ str , List [ str ] ] ) -> DataSet : enumerator = enumerate ( sorted ( languages . items ( ) ) ) rank_map = { ext : rank for rank , ( _ , exts ) in enumerator for ext in exts } with multiprocessing . Pool ( initializer = _process_init ) as pool : file_iterator = ( ( path , rank_map ) for path in files ) arrays = _to_arrays ( pool . starmap ( _extract_features , file_iterator ) ) LOGGER . debug ( "Extracted arrays count: %d" , len ( arrays [ 0 ] ) ) return arrays
Extract arrays of features from the given files .
17,208
def safe_read_file ( file_path : Path ) -> str : for encoding in FILE_ENCODINGS : try : return file_path . read_text ( encoding = encoding ) except UnicodeError : pass raise GuesslangError ( 'Encoding not supported for {!s}' . format ( file_path ) )
Read a text file . Several text encodings are tried until the file content is correctly decoded .
17,209
def config_logging ( debug : bool = False ) -> None : if debug : level = 'DEBUG' tf_level = tf . logging . INFO else : level = 'INFO' tf_level = tf . logging . ERROR logging_config = config_dict ( 'logging.json' ) for logger in logging_config [ 'loggers' ] . values ( ) : logger [ 'level' ] = level logging . config . dictConfig ( logging_config ) tf . logging . set_verbosity ( tf_level )
Set - up application and tensorflow logging .
17,210
def config_dict ( name : str ) -> Dict [ str , Any ] : try : content = resource_string ( PACKAGE , DATADIR . format ( name ) ) . decode ( ) except DistributionNotFound as error : LOGGER . warning ( "Cannot load %s from packages: %s" , name , error ) content = DATA_FALLBACK . joinpath ( name ) . read_text ( ) return cast ( Dict [ str , Any ] , json . loads ( content ) )
Load a JSON configuration dict from Guesslang config directory .
17,211
def model_info ( model_dir : Optional [ str ] = None ) -> Tuple [ str , bool ] : if model_dir is None : try : model_dir = resource_filename ( PACKAGE , DATADIR . format ( 'model' ) ) except DistributionNotFound as error : LOGGER . warning ( "Cannot load model from packages: %s" , error ) model_dir = str ( DATA_FALLBACK . joinpath ( 'model' ) . absolute ( ) ) is_default_model = True else : is_default_model = False model_path = Path ( model_dir ) model_path . mkdir ( exist_ok = True ) LOGGER . debug ( "Using model: %s, default: %s" , model_path , is_default_model ) return ( model_dir , is_default_model )
Retrieve Guesslang model directory name and tells if it is the default model .
17,212
def format ( self , record : logging . LogRecord ) -> str : if platform . system ( ) != 'Linux' : return super ( ) . format ( record ) record . msg = ( self . STYLE [ record . levelname ] + record . msg + self . STYLE [ 'END' ] ) record . levelname = ( self . STYLE [ 'LEVEL' ] + record . levelname + self . STYLE [ 'END' ] ) return super ( ) . format ( record )
Format log records to produce colored messages .
17,213
def main ( ) : parser = argparse . ArgumentParser ( description = __doc__ ) parser . add_argument ( 'githubtoken' , help = "Github OAuth token, see https://developer.github.com/v3/oauth/" ) parser . add_argument ( 'destination' , help = "location of the downloaded repos" ) parser . add_argument ( '-n' , '--nbrepo' , help = "number of repositories per language" , type = int , default = 1000 ) parser . add_argument ( '-d' , '--debug' , default = False , action = 'store_true' , help = "show debug messages" ) args = parser . parse_args ( ) config_logging ( args . debug ) destination = Path ( args . destination ) nb_repos = args . nbrepo token = args . githubtoken languages = config_dict ( 'languages.json' ) destination . mkdir ( exist_ok = True ) for pos , language in enumerate ( sorted ( languages ) , 1 ) : LOGGER . info ( "Step %.2f%%, %s" , 100 * pos / len ( languages ) , language ) LOGGER . info ( "Fetch %d repos infos for language %s" , nb_repos , language ) repos = _retrieve_repo_details ( language , nb_repos , token ) LOGGER . info ( "%d repos details kept. Downloading" , len ( repos ) ) _download_repos ( language , repos , destination ) LOGGER . info ( "Language %s repos downloaded" , language ) LOGGER . debug ( "Exit OK" )
Github repositories downloaded command line
17,214
def retry ( default = None ) : def decorator ( func ) : @ functools . wraps ( func ) def _wrapper ( * args , ** kw ) : for pos in range ( 1 , MAX_RETRIES ) : try : return func ( * args , ** kw ) except ( RuntimeError , requests . ConnectionError ) as error : LOGGER . warning ( "Failed: %s, %s" , type ( error ) , error ) for _ in range ( pos ) : _rest ( ) LOGGER . warning ( "Request Aborted" ) return default return _wrapper return decorator
Retry functions after failures
17,215
def main ( ) : parser = argparse . ArgumentParser ( description = __doc__ ) parser . add_argument ( 'learn' , help = "learning source codes directory" ) parser . add_argument ( 'keywords' , help = "output keywords file, JSON" ) parser . add_argument ( '-n' , '--nbkeywords' , type = int , default = 10000 , help = "the number of keywords to keep" ) parser . add_argument ( '-d' , '--debug' , default = False , action = 'store_true' , help = "show debug messages" ) args = parser . parse_args ( ) config_logging ( args . debug ) learn_path = Path ( args . learn ) keywords_path = Path ( args . keywords ) nb_keywords = args . nbkeywords languages = config_dict ( 'languages.json' ) exts = { ext : lang for lang , exts in languages . items ( ) for ext in exts } term_count = Counter ( ) document_count = Counter ( ) pos = 0 LOGGER . info ( "Reading files form %s" , learn_path ) for pos , path in enumerate ( Path ( learn_path ) . glob ( '**/*' ) , 1 ) : if pos % STEP == 0 : LOGGER . debug ( "Processed %d" , pos ) gc . collect ( ) if not path . is_file ( ) or not exts . get ( path . suffix . lstrip ( '.' ) ) : continue counter = _extract ( path ) term_count . update ( counter ) document_count . update ( counter . keys ( ) ) nb_terms = sum ( term_count . values ( ) ) nb_documents = pos - 1 if not nb_documents : LOGGER . error ( "No source files found in %s" , learn_path ) raise RuntimeError ( 'No source files in {}' . format ( learn_path ) ) LOGGER . info ( "%d unique terms found" , len ( term_count ) ) terms = _most_frequent ( ( term_count , nb_terms ) , ( document_count , nb_documents ) , nb_keywords ) keywords = { token : int ( hashlib . sha1 ( token . encode ( ) ) . hexdigest ( ) , 16 ) for token in terms } with keywords_path . open ( 'w' ) as keywords_file : json . dump ( keywords , keywords_file , indent = 2 , sort_keys = True ) LOGGER . info ( "%d keywords written into %s" , len ( keywords ) , keywords_path ) LOGGER . debug ( "Exit OK" )
Keywords generator command line
17,216
def main ( ) -> None : try : _real_main ( ) except GuesslangError as error : LOGGER . critical ( "Failed: %s" , error ) sys . exit ( - 1 ) except KeyboardInterrupt : LOGGER . critical ( "Cancelled!" ) sys . exit ( - 2 )
Run command line
17,217
def split ( text : str ) -> List [ str ] : return [ word for word in SEPARATOR . split ( text ) if word . strip ( ' \t' ) ]
Split a text into a list of tokens .
17,218
def main ( ) : parser = argparse . ArgumentParser ( description = __doc__ , formatter_class = argparse . RawDescriptionHelpFormatter ) parser . add_argument ( 'source' , help = "location of the downloaded repos" ) parser . add_argument ( 'destination' , help = "location of the extracted files" ) parser . add_argument ( '-t' , '--nb-test-files' , help = "number of testing files per language" , type = int , default = 5000 ) parser . add_argument ( '-l' , '--nb-learn-files' , help = "number of learning files per language" , type = int , default = 10000 ) parser . add_argument ( '-r' , '--remove' , help = "remove repos that cannot be read" , action = 'store_true' , default = False ) parser . add_argument ( '-d' , '--debug' , default = False , action = 'store_true' , help = "show debug messages" ) args = parser . parse_args ( ) config_logging ( args . debug ) source = Path ( args . source ) destination = Path ( args . destination ) nb_test = args . nb_test_files nb_learn = args . nb_learn_files remove = args . remove repos = _find_repos ( source ) split_repos = _split_repos ( repos , nb_test , nb_learn ) split_files = _find_files ( * split_repos , nb_test , nb_learn , remove ) _unzip_all ( * split_files , destination ) LOGGER . info ( "Files saved into %s" , destination ) LOGGER . debug ( "Exit OK" )
Files extractor command line
17,219
def combine_slices ( slice_datasets , rescale = None ) : if len ( slice_datasets ) == 0 : raise DicomImportException ( "Must provide at least one DICOM dataset" ) _validate_slices_form_uniform_grid ( slice_datasets ) voxels = _merge_slice_pixel_arrays ( slice_datasets , rescale ) transform = _ijk_to_patient_xyz_transform_matrix ( slice_datasets ) return voxels , transform
Given a list of pydicom datasets for an image series stitch them together into a three - dimensional numpy array . Also calculate a 4x4 affine transformation matrix that converts the ijk - pixel - indices into the xyz - coordinates in the DICOM patient s coordinate system .
17,220
def _validate_slices_form_uniform_grid ( slice_datasets ) : invariant_properties = [ 'Modality' , 'SOPClassUID' , 'SeriesInstanceUID' , 'Rows' , 'Columns' , 'PixelSpacing' , 'PixelRepresentation' , 'BitsAllocated' , 'BitsStored' , 'HighBit' , ] for property_name in invariant_properties : _slice_attribute_equal ( slice_datasets , property_name ) _validate_image_orientation ( slice_datasets [ 0 ] . ImageOrientationPatient ) _slice_ndarray_attribute_almost_equal ( slice_datasets , 'ImageOrientationPatient' , 1e-5 ) slice_positions = _slice_positions ( slice_datasets ) _check_for_missing_slices ( slice_positions )
Perform various data checks to ensure that the list of slices form a evenly - spaced grid of data . Some of these checks are probably not required if the data follows the DICOM specification however it seems pertinent to check anyway .
17,221
def parse_url ( cls , string ) : match = cls . URL_RE . match ( string ) if not match : raise InvalidKeyError ( cls , string ) return match . groupdict ( )
If it can be parsed as a version_guid with no preceding org + offering returns a dict with key version_guid and the value
17,222
def offering ( self ) : warnings . warn ( "Offering is no longer a supported property of Locator. Please use the course and run properties." , DeprecationWarning , stacklevel = 2 ) if not self . course and not self . run : return None elif not self . run and self . course : return self . course return "/" . join ( [ self . course , self . run ] )
Deprecated . Use course and run independently .
17,223
def make_usage_key_from_deprecated_string ( self , location_url ) : warnings . warn ( "make_usage_key_from_deprecated_string is deprecated! Please use make_usage_key" , DeprecationWarning , stacklevel = 2 ) return BlockUsageLocator . from_string ( location_url ) . replace ( run = self . run )
Deprecated mechanism for creating a UsageKey given a CourseKey and a serialized Location .
17,224
def _from_string ( cls , serialized ) : course_key = CourseLocator . _from_string ( serialized ) parsed_parts = cls . parse_url ( serialized ) block_id = parsed_parts . get ( 'block_id' , None ) if block_id is None : raise InvalidKeyError ( cls , serialized ) return cls ( course_key , parsed_parts . get ( 'block_type' ) , block_id )
Requests CourseLocator to deserialize its part and then adds the local deserialization of block
17,225
def _parse_block_ref ( cls , block_ref , deprecated = False ) : if deprecated and block_ref is None : return None if isinstance ( block_ref , LocalId ) : return block_ref is_valid_deprecated = deprecated and cls . DEPRECATED_ALLOWED_ID_RE . match ( block_ref ) is_valid = cls . ALLOWED_ID_RE . match ( block_ref ) if is_valid or is_valid_deprecated : return block_ref else : raise InvalidKeyError ( cls , block_ref )
Given block_ref tries to parse it into a valid block reference .
17,226
def html_id ( self ) : if self . deprecated : id_fields = [ self . DEPRECATED_TAG , self . org , self . course , self . block_type , self . block_id , self . version_guid ] id_string = u"-" . join ( [ v for v in id_fields if v is not None ] ) return self . clean_for_html ( id_string ) else : return self . block_id
Return an id which can be used on an html page as an id attr of an html element . It is currently also persisted by some clients to identify blocks .
17,227
def to_deprecated_son ( self , prefix = '' , tag = 'i4x' ) : son = SON ( { prefix + 'tag' : tag } ) for field_name in ( 'org' , 'course' ) : son [ prefix + field_name ] = getattr ( self . course_key , field_name ) for ( dep_field_name , field_name ) in [ ( 'category' , 'block_type' ) , ( 'name' , 'block_id' ) ] : son [ prefix + dep_field_name ] = getattr ( self , field_name ) son [ prefix + 'revision' ] = self . course_key . branch return son
Returns a SON object that represents this location
17,228
def _from_deprecated_son ( cls , id_dict , run ) : course_key = CourseLocator ( id_dict [ 'org' ] , id_dict [ 'course' ] , run , id_dict [ 'revision' ] , deprecated = True , ) return cls ( course_key , id_dict [ 'category' ] , id_dict [ 'name' ] , deprecated = True )
Return the Location decoding this id_dict and run
17,229
def _from_string ( cls , serialized ) : library_key = LibraryLocator . _from_string ( serialized ) parsed_parts = LibraryLocator . parse_url ( serialized ) block_id = parsed_parts . get ( 'block_id' , None ) if block_id is None : raise InvalidKeyError ( cls , serialized ) block_type = parsed_parts . get ( 'block_type' ) if block_type is None : raise InvalidKeyError ( cls , serialized ) return cls ( library_key , parsed_parts . get ( 'block_type' ) , block_id )
Requests LibraryLocator to deserialize its part and then adds the local deserialization of block
17,230
def for_branch ( self , branch ) : return self . replace ( library_key = self . library_key . for_branch ( branch ) )
Return a UsageLocator for the same block in a different branch of the library .
17,231
def for_version ( self , version_guid ) : return self . replace ( library_key = self . library_key . for_version ( version_guid ) )
Return a UsageLocator for the same block in a different version of the library .
17,232
def _strip_object ( key ) : if hasattr ( key , 'version_agnostic' ) and hasattr ( key , 'for_branch' ) : return key . for_branch ( None ) . version_agnostic ( ) else : return key
Strips branch and version info if the given key supports those attributes .
17,233
def _strip_value ( value , lookup = 'exact' ) : if lookup == 'in' : stripped_value = [ _strip_object ( el ) for el in value ] else : stripped_value = _strip_object ( value ) return stripped_value
Helper function to remove the branch and version information from the given value which could be a single object or a list .
17,234
def _deprecation_warning ( cls ) : if issubclass ( cls , Location ) : warnings . warn ( "Location is deprecated! Please use locator.BlockUsageLocator" , DeprecationWarning , stacklevel = 3 ) elif issubclass ( cls , AssetLocation ) : warnings . warn ( "AssetLocation is deprecated! Please use locator.AssetLocator" , DeprecationWarning , stacklevel = 3 ) else : warnings . warn ( "{} is deprecated!" . format ( cls ) , DeprecationWarning , stacklevel = 3 )
Display a deprecation warning for the given cls
17,235
def _check_location_part ( cls , val , regexp ) : cls . _deprecation_warning ( ) return CourseLocator . _check_location_part ( val , regexp )
Deprecated . See CourseLocator . _check_location_part
17,236
def _clean ( cls , value , invalid ) : cls . _deprecation_warning ( ) return BlockUsageLocator . _clean ( value , invalid )
Deprecated . See BlockUsageLocator . _clean
17,237
def _join_keys_v1 ( left , right ) : if left . endswith ( ':' ) or '::' in left : raise ValueError ( "Can't join a left string ending in ':' or containing '::'" ) return u"{}::{}" . format ( _encode_v1 ( left ) , _encode_v1 ( right ) )
Join two keys into a format separable by using _split_keys_v1 .
17,238
def _split_keys_v1 ( joined ) : left , _ , right = joined . partition ( '::' ) return _decode_v1 ( left ) , _decode_v1 ( right )
Split two keys out a string created by _join_keys_v1 .
17,239
def _split_keys_v2 ( joined ) : left , _ , right = joined . rpartition ( '::' ) return _decode_v2 ( left ) , _decode_v2 ( right )
Split two keys out a string created by _join_keys_v2 .
17,240
def refresher ( name , refreshers = CompletionRefresher . refreshers ) : def wrapper ( wrapped ) : refreshers [ name ] = wrapped return wrapped return wrapper
Decorator to add the decorated function to the dictionary of refreshers . Any function decorated with a
17,241
def refresh ( self , executor , callbacks , completer_options = None ) : if completer_options is None : completer_options = { } if self . is_refreshing ( ) : self . _restart_refresh . set ( ) return [ ( None , None , None , 'Auto-completion refresh restarted.' ) ] else : self . _completer_thread = threading . Thread ( target = self . _bg_refresh , args = ( executor , callbacks , completer_options ) , name = 'completion_refresh' ) self . _completer_thread . setDaemon ( True ) self . _completer_thread . start ( ) return [ ( None , None , None , 'Auto-completion refresh started in the background.' ) ]
Creates a SQLCompleter object and populates it with the relevant completion suggestions in a background thread .
17,242
def handle_cd_command ( arg ) : CD_CMD = 'cd' tokens = arg . split ( CD_CMD + ' ' ) directory = tokens [ - 1 ] if len ( tokens ) > 1 else None if not directory : return False , "No folder name was provided." try : os . chdir ( directory ) subprocess . call ( [ 'pwd' ] ) return True , None except OSError as e : return False , e . strerror
Handles a cd shell command by calling python s os . chdir .
17,243
def get_editor_query ( sql ) : sql = sql . strip ( ) pattern = re . compile ( '(^\\\e|\\\e$)' ) while pattern . search ( sql ) : sql = pattern . sub ( '' , sql ) return sql
Get the query part of an editor command .
17,244
def delete_favorite_query ( arg , ** _ ) : usage = 'Syntax: \\fd name.\n\n' + favoritequeries . usage if not arg : return [ ( None , None , None , usage ) ] status = favoritequeries . delete ( arg ) return [ ( None , None , None , status ) ]
Delete an existing favorite query .
17,245
def execute_system_command ( arg , ** _ ) : usage = "Syntax: system [command].\n" if not arg : return [ ( None , None , None , usage ) ] try : command = arg . strip ( ) if command . startswith ( 'cd' ) : ok , error_message = handle_cd_command ( arg ) if not ok : return [ ( None , None , None , error_message ) ] return [ ( None , None , None , '' ) ] args = arg . split ( ' ' ) process = subprocess . Popen ( args , stdout = subprocess . PIPE , stderr = subprocess . PIPE ) output , error = process . communicate ( ) response = output if not error else error if isinstance ( response , bytes ) : encoding = locale . getpreferredencoding ( False ) response = response . decode ( encoding ) return [ ( None , None , None , response ) ] except OSError as e : return [ ( None , None , None , 'OSError: %s' % e . strerror ) ]
Execute a system shell command .
17,246
def need_completion_refresh ( queries ) : tokens = { 'use' , '\\u' , 'create' , 'drop' } for query in sqlparse . split ( queries ) : try : first_token = query . split ( ) [ 0 ] if first_token . lower ( ) in tokens : return True except Exception : return False
Determines if the completion needs a refresh by checking if the sql statement is an alter create drop or change db .
17,247
def is_mutating ( status ) : if not status : return False mutating = set ( [ 'insert' , 'update' , 'delete' , 'alter' , 'create' , 'drop' , 'replace' , 'truncate' , 'load' ] ) return status . split ( None , 1 ) [ 0 ] . lower ( ) in mutating
Determines if the statement is mutating based on the status .
17,248
def change_prompt_format ( self , arg , ** _ ) : if not arg : message = 'Missing required argument, format.' return [ ( None , None , None , message ) ] self . prompt = self . get_prompt ( arg ) return [ ( None , None , None , "Changed prompt format to %s" % arg ) ]
Change the prompt format .
17,249
def get_output_margin ( self , status = None ) : margin = self . get_reserved_space ( ) + self . get_prompt ( self . prompt ) . count ( '\n' ) + 1 if special . is_timing_enabled ( ) : margin += 1 if status : margin += 1 + status . count ( '\n' ) return margin
Get the output margin ( number of rows for the prompt footer and timing message .
17,250
def output ( self , output , status = None ) : if output : size = self . cli . output . get_size ( ) margin = self . get_output_margin ( status ) fits = True buf = [ ] output_via_pager = self . explicit_pager and special . is_pager_enabled ( ) for i , line in enumerate ( output , 1 ) : special . write_tee ( line ) special . write_once ( line ) if fits or output_via_pager : buf . append ( line ) if len ( line ) > size . columns or i > ( size . rows - margin ) : fits = False if not self . explicit_pager and special . is_pager_enabled ( ) : output_via_pager = True if not output_via_pager : for line in buf : click . secho ( line ) buf = [ ] else : click . secho ( line ) if buf : if output_via_pager : click . echo_via_pager ( "\n" . join ( buf ) ) else : for line in buf : click . secho ( line ) if status : click . secho ( status )
Output text to stdout or a pager command . The status text is not outputted to pager or files . The message will be logged in the audit log if enabled . The message will be written to the tee file if enabled . The message will be written to the output file if enabled .
17,251
def _on_completions_refreshed ( self , new_completer ) : with self . _completer_lock : self . completer = new_completer if self . cli : self . cli . current_buffer . completer = new_completer if self . cli : self . cli . request_redraw ( )
Swap the completer object in cli with the newly created completer .
17,252
def get_reserved_space ( self ) : reserved_space_ratio = .45 max_reserved_space = 8 _ , height = click . get_terminal_size ( ) return min ( int ( round ( height * reserved_space_ratio ) ) , max_reserved_space )
Get the number of lines to reserve for the completion menu .
17,253
def find_matches ( text , collection , start_only = False , fuzzy = True , casing = None ) : last = last_word ( text , include = 'most_punctuations' ) text = last . lower ( ) completions = [ ] if fuzzy : regex = '.*?' . join ( map ( escape , text ) ) pat = compile ( '(%s)' % regex ) for item in sorted ( collection ) : r = pat . search ( item . lower ( ) ) if r : completions . append ( ( len ( r . group ( ) ) , r . start ( ) , item ) ) else : match_end_limit = len ( text ) if start_only else None for item in sorted ( collection ) : match_point = item . lower ( ) . find ( text , 0 , match_end_limit ) if match_point >= 0 : completions . append ( ( len ( text ) , match_point , item ) ) if casing == 'auto' : casing = 'lower' if last and last [ - 1 ] . islower ( ) else 'upper' def apply_case ( kw ) : if casing == 'upper' : return kw . upper ( ) return kw . lower ( ) return ( Completion ( z if casing is None else apply_case ( z ) , - len ( text ) ) for x , y , z in sorted ( completions ) )
Find completion matches for the given text . Given the user s input text and a collection of available completions find completions matching the last word of the text . If start_only is True the text will match an available completion only at the beginning . Otherwise a completion is considered a match if the text appears anywhere within it . yields prompt_toolkit Completion instances for any matches found in the collection of available completions .
17,254
def log ( logger , level , message ) : if logger . parent . name != 'root' : logger . log ( level , message ) else : print ( message , file = sys . stderr )
Logs message to stderr if logging isn t initialized .
17,255
def read_config_file ( f ) : if isinstance ( f , basestring ) : f = os . path . expanduser ( f ) try : config = ConfigObj ( f , interpolation = False , encoding = 'utf8' ) except ConfigObjError as e : log ( LOGGER , logging . ERROR , "Unable to parse line {0} of config file " "'{1}'." . format ( e . line_number , f ) ) log ( LOGGER , logging . ERROR , "Using successfully parsed config values." ) return e . config except ( IOError , OSError ) as e : log ( LOGGER , logging . WARNING , "You don't have permission to read " "config file '{0}'." . format ( e . filename ) ) return None return config
Read a config file .
17,256
def read_config_files ( files ) : config = ConfigObj ( ) for _file in files : _config = read_config_file ( _file ) if bool ( _config ) is True : config . merge ( _config ) config . filename = _config . filename return config
Read and merge a list of config files .
17,257
def cli_bindings ( ) : key_binding_manager = KeyBindingManager ( enable_open_in_editor = True , enable_system_bindings = True , enable_auto_suggest_bindings = True , enable_search = True , enable_abort_and_exit_bindings = True ) @ key_binding_manager . registry . add_binding ( Keys . F2 ) def _ ( event ) : _logger . debug ( 'Detected F2 key.' ) buf = event . cli . current_buffer buf . completer . smart_completion = not buf . completer . smart_completion @ key_binding_manager . registry . add_binding ( Keys . F3 ) def _ ( event ) : _logger . debug ( 'Detected F3 key.' ) buf = event . cli . current_buffer buf . always_multiline = not buf . always_multiline @ key_binding_manager . registry . add_binding ( Keys . F4 ) def _ ( event ) : _logger . debug ( 'Detected F4 key.' ) if event . cli . editing_mode == EditingMode . VI : event . cli . editing_mode = EditingMode . EMACS else : event . cli . editing_mode = EditingMode . VI @ key_binding_manager . registry . add_binding ( Keys . Tab ) def _ ( event ) : _logger . debug ( 'Detected <Tab> key.' ) b = event . cli . current_buffer if b . complete_state : b . complete_next ( ) else : event . cli . start_completion ( select_first = True ) @ key_binding_manager . registry . add_binding ( Keys . ControlSpace ) def _ ( event ) : _logger . debug ( 'Detected <C-Space> key.' ) b = event . cli . current_buffer if b . complete_state : b . complete_next ( ) else : event . cli . start_completion ( select_first = False ) @ key_binding_manager . registry . add_binding ( Keys . ControlJ , filter = HasSelectedCompletion ( ) ) def _ ( event ) : _logger . debug ( 'Detected <C-J> key.' ) event . current_buffer . complete_state = None b = event . cli . current_buffer b . complete_state = None return key_binding_manager
Custom key bindings for cli .
17,258
def prompt ( * args , ** kwargs ) : try : return click . prompt ( * args , ** kwargs ) except click . Abort : return False
Prompt the user for input and handle any abort exceptions .
17,259
def run ( self , statement ) : statement = statement . strip ( ) if not statement : yield ( None , None , None , None ) components = sqlparse . split ( statement ) for sql in components : sql = sql . rstrip ( ';' ) if sql . endswith ( '\\G' ) : special . set_expanded_output ( True ) sql = sql [ : - 2 ] . strip ( ) cur = self . conn . cursor ( ) try : for result in special . execute ( cur , sql ) : yield result except special . CommandNotFound : cur . execute ( sql ) yield self . get_result ( cur )
Execute the sql in the database and return the results .
17,260
def get_result ( self , cursor ) : title = headers = None if cursor . description is not None : headers = [ x [ 0 ] for x in cursor . description ] rows = cursor . fetchall ( ) status = '%d row%s in set' % ( len ( rows ) , '' if len ( rows ) == 1 else 's' ) else : logger . debug ( 'No rows in result.' ) rows = None status = 'Query OK' return ( title , rows , headers , status )
Get the current result s data from the cursor .
17,261
def tables ( self ) : with self . conn . cursor ( ) as cur : cur . execute ( self . TABLES_QUERY ) for row in cur : yield row
Yields table names .
17,262
def table_columns ( self ) : with self . conn . cursor ( ) as cur : cur . execute ( self . TABLE_COLUMNS_QUERY % self . database ) for row in cur : yield row
Yields column names .
17,263
def create_toolbar_tokens_func ( get_is_refreshing , show_fish_help ) : token = Token . Toolbar def get_toolbar_tokens ( cli ) : result = [ ] result . append ( ( token , ' ' ) ) if cli . buffers [ DEFAULT_BUFFER ] . always_multiline : result . append ( ( token . On , '[F3] Multiline: ON ' ) ) else : result . append ( ( token . Off , '[F3] Multiline: OFF ' ) ) if cli . buffers [ DEFAULT_BUFFER ] . always_multiline : result . append ( ( token , ' (Semi-colon [;] will end the line)' ) ) if cli . editing_mode == EditingMode . VI : result . append ( ( token . On , 'Vi-mode ({})' . format ( _get_vi_mode ( cli ) ) ) ) if show_fish_help ( ) : result . append ( ( token , ' Right-arrow to complete suggestion' ) ) if get_is_refreshing ( ) : result . append ( ( token , ' Refreshing completions...' ) ) return result return get_toolbar_tokens
Return a function that generates the toolbar tokens .
17,264
def _get_vi_mode ( cli ) : return { InputMode . INSERT : 'I' , InputMode . NAVIGATION : 'N' , InputMode . REPLACE : 'R' , InputMode . INSERT_MULTIPLE : 'M' } [ cli . vi_state . input_mode ]
Get the current vi mode for display .
17,265
def export ( defn ) : globals ( ) [ defn . __name__ ] = defn __all__ . append ( defn . __name__ ) return defn
Decorator to explicitly mark functions that are exposed in a lib .
17,266
def execute ( cur , sql ) : command , verbose , arg = parse_special_command ( sql ) if ( command not in COMMANDS ) and ( command . lower ( ) not in COMMANDS ) : raise CommandNotFound try : special_cmd = COMMANDS [ command ] except KeyError : special_cmd = COMMANDS [ command . lower ( ) ] if special_cmd . case_sensitive : raise CommandNotFound ( 'Command not found: %s' % command ) if command == 'help' and arg : return show_keyword_help ( cur = cur , arg = arg ) if special_cmd . arg_type == NO_QUERY : return special_cmd . handler ( ) elif special_cmd . arg_type == PARSED_QUERY : return special_cmd . handler ( cur = cur , arg = arg , verbose = verbose ) elif special_cmd . arg_type == RAW_QUERY : return special_cmd . handler ( cur = cur , query = sql )
Execute a special command and return the results . If the special command is not supported a KeyError will be raised .
17,267
def find_prev_keyword ( sql ) : if not sql . strip ( ) : return None , '' parsed = sqlparse . parse ( sql ) [ 0 ] flattened = list ( parsed . flatten ( ) ) logical_operators = ( 'AND' , 'OR' , 'NOT' , 'BETWEEN' ) for t in reversed ( flattened ) : if t . value == '(' or ( t . is_keyword and ( t . value . upper ( ) not in logical_operators ) ) : idx = flattened . index ( t ) text = '' . join ( tok . value for tok in flattened [ : idx + 1 ] ) return t , text return None , ''
Find the last sql keyword in an SQL statement Returns the value of the last keyword and the text of the query with everything after the last keyword stripped
17,268
def _get_thumbnail_options ( self , context , instance ) : width , height = None , None subject_location = False placeholder_width = context . get ( 'width' , None ) placeholder_height = context . get ( 'height' , None ) if instance . use_autoscale and placeholder_width : width = int ( placeholder_width ) if instance . use_autoscale and placeholder_height : height = int ( placeholder_height ) elif instance . width : width = instance . width if instance . height : height = instance . height if instance . image : if instance . image . subject_location : subject_location = instance . image . subject_location if not height and width : height = int ( float ( width ) * float ( instance . image . height ) / float ( instance . image . width ) ) if not width and height : width = int ( float ( height ) * float ( instance . image . width ) / float ( instance . image . height ) ) if not width : width = instance . image . width if not height : height = instance . image . height return { 'size' : ( width , height ) , 'subject_location' : subject_location }
Return the size and options of the thumbnail that should be inserted
17,269
def create_image_plugin ( filename , image , parent_plugin , ** kwargs ) : from cmsplugin_filer_image . models import FilerImage from filer . models import Image image_plugin = FilerImage ( ) image_plugin . placeholder = parent_plugin . placeholder image_plugin . parent = CMSPlugin . objects . get ( pk = parent_plugin . id ) image_plugin . position = CMSPlugin . objects . filter ( parent = parent_plugin ) . count ( ) image_plugin . language = parent_plugin . language image_plugin . plugin_type = 'FilerImagePlugin' image . seek ( 0 ) image_model = Image . objects . create ( file = SimpleUploadedFile ( name = filename , content = image . read ( ) ) ) image_plugin . image = image_model image_plugin . save ( ) return image_plugin
Used for drag - n - drop image insertion with djangocms - text - ckeditor . Set TEXT_SAVE_IMAGE_FUNCTION = cmsplugin_filer_image . integrations . ckeditor . create_image_plugin to enable .
17,270
def rename_tables ( db , table_mapping , reverse = False ) : from django . db import connection if reverse : table_mapping = [ ( dst , src ) for src , dst in table_mapping ] table_names = connection . introspection . table_names ( ) for source , destination in table_mapping : if source in table_names and destination in table_names : print ( u" WARNING: not renaming {0} to {1}, because both tables already exist." . format ( source , destination ) ) elif source in table_names and destination not in table_names : print ( u" - renaming {0} to {1}" . format ( source , destination ) ) db . rename_table ( source , destination )
renames tables from source to destination name if the source exists and the destination does not exist yet .
17,271
def group_and_sort_statements ( stmt_list , ev_totals = None ) : def _count ( stmt ) : if ev_totals is None : return len ( stmt . evidence ) else : return ev_totals [ stmt . get_hash ( ) ] stmt_rows = defaultdict ( list ) stmt_counts = defaultdict ( lambda : 0 ) arg_counts = defaultdict ( lambda : 0 ) for key , s in _get_keyed_stmts ( stmt_list ) : stmt_rows [ key ] . append ( s ) stmt_counts [ key ] += _count ( s ) if key [ 0 ] == 'Conversion' : subj = key [ 1 ] for obj in key [ 2 ] + key [ 3 ] : arg_counts [ ( subj , obj ) ] += _count ( s ) else : arg_counts [ key [ 1 : ] ] += _count ( s ) def process_rows ( stmt_rows ) : for key , stmts in stmt_rows . items ( ) : verb = key [ 0 ] inps = key [ 1 : ] sub_count = stmt_counts [ key ] arg_count = arg_counts [ inps ] if verb == 'Complex' and sub_count == arg_count and len ( inps ) <= 2 : if all ( [ len ( set ( ag . name for ag in s . agent_list ( ) ) ) > 2 for s in stmts ] ) : continue new_key = ( arg_count , inps , sub_count , verb ) stmts = sorted ( stmts , key = lambda s : _count ( s ) + 1 / ( 1 + len ( s . agent_list ( ) ) ) , reverse = True ) yield new_key , verb , stmts sorted_groups = sorted ( process_rows ( stmt_rows ) , key = lambda tpl : tpl [ 0 ] , reverse = True ) return sorted_groups
Group statements by type and arguments and sort by prevalence .
17,272
def make_stmt_from_sort_key ( key , verb ) : def make_agent ( name ) : if name == 'None' or name is None : return None return Agent ( name ) StmtClass = get_statement_by_name ( verb ) inps = list ( key [ 1 ] ) if verb == 'Complex' : stmt = StmtClass ( [ make_agent ( name ) for name in inps ] ) elif verb == 'Conversion' : stmt = StmtClass ( make_agent ( inps [ 0 ] ) , [ make_agent ( name ) for name in inps [ 1 ] ] , [ make_agent ( name ) for name in inps [ 2 ] ] ) elif verb == 'ActiveForm' or verb == 'HasActivity' : stmt = StmtClass ( make_agent ( inps [ 0 ] ) , inps [ 1 ] , inps [ 2 ] ) else : stmt = StmtClass ( * [ make_agent ( name ) for name in inps ] ) return stmt
Make a Statement from the sort key .
17,273
def get_ecs_cluster_for_queue ( queue_name , batch_client = None ) : if batch_client is None : batch_client = boto3 . client ( 'batch' ) queue_resp = batch_client . describe_job_queues ( jobQueues = [ queue_name ] ) if len ( queue_resp [ 'jobQueues' ] ) == 1 : queue = queue_resp [ 'jobQueues' ] [ 0 ] else : raise BatchReadingError ( 'Error finding queue with name %s.' % queue_name ) compute_env_names = queue [ 'computeEnvironmentOrder' ] if len ( compute_env_names ) == 1 : compute_env_name = compute_env_names [ 0 ] [ 'computeEnvironment' ] else : raise BatchReadingError ( 'Error finding the compute environment name ' 'for %s.' % queue_name ) compute_envs = batch_client . describe_compute_environments ( computeEnvironments = [ compute_env_name ] ) [ 'computeEnvironments' ] if len ( compute_envs ) == 1 : compute_env = compute_envs [ 0 ] else : raise BatchReadingError ( "Error getting compute environment %s for %s. " "Got %d environments instead of 1." % ( compute_env_name , queue_name , len ( compute_envs ) ) ) ecs_cluster_name = os . path . basename ( compute_env [ 'ecsClusterArn' ] ) return ecs_cluster_name
Get the name of the ecs cluster using the batch client .
17,274
def tag_instances_on_cluster ( cluster_name , project = 'cwc' ) : ecs = boto3 . client ( 'ecs' ) task_arns = ecs . list_tasks ( cluster = cluster_name ) [ 'taskArns' ] if not task_arns : return tasks = ecs . describe_tasks ( cluster = cluster_name , tasks = task_arns ) [ 'tasks' ] container_instances = ecs . describe_container_instances ( cluster = cluster_name , containerInstances = [ task [ 'containerInstanceArn' ] for task in tasks ] ) [ 'containerInstances' ] ec2_instance_ids = [ ci [ 'ec2InstanceId' ] for ci in container_instances ] for instance_id in ec2_instance_ids : tag_instance ( instance_id , project = project ) return
Adds project tag to untagged instances in a given cluster .
17,275
def submit_reading ( basename , pmid_list_filename , readers , start_ix = None , end_ix = None , pmids_per_job = 3000 , num_tries = 2 , force_read = False , force_fulltext = False , project_name = None ) : sub = PmidSubmitter ( basename , readers , project_name ) sub . set_options ( force_read , force_fulltext ) sub . submit_reading ( pmid_list_filename , start_ix , end_ix , pmids_per_job , num_tries ) return sub . job_list
Submit an old - style pmid - centered no - database s3 only reading job .
17,276
def submit_combine ( basename , readers , job_ids = None , project_name = None ) : sub = PmidSubmitter ( basename , readers , project_name ) sub . job_list = job_ids sub . submit_combine ( ) return sub
Submit a batch job to combine the outputs of a reading job .
17,277
def submit_reading ( self , input_fname , start_ix , end_ix , ids_per_job , num_tries = 1 , stagger = 0 ) : self . ids_per_job = ids_per_job id_list_key = 'reading_results/%s/%s' % ( self . basename , self . _s3_input_name ) s3_client = boto3 . client ( 's3' ) s3_client . upload_file ( input_fname , bucket_name , id_list_key ) if end_ix is None : with open ( input_fname , 'rt' ) as f : lines = f . readlines ( ) end_ix = len ( lines ) if start_ix is None : start_ix = 0 environment_vars = get_environment ( ) batch_client = boto3 . client ( 'batch' , region_name = 'us-east-1' ) job_list = [ ] for job_start_ix in range ( start_ix , end_ix , ids_per_job ) : sleep ( stagger ) job_end_ix = job_start_ix + ids_per_job if job_end_ix > end_ix : job_end_ix = end_ix job_name , cmd = self . _make_command ( job_start_ix , job_end_ix ) command_list = get_batch_command ( cmd , purpose = self . _purpose , project = self . project_name ) logger . info ( 'Command list: %s' % str ( command_list ) ) job_info = batch_client . submit_job ( jobName = job_name , jobQueue = self . _job_queue , jobDefinition = self . _job_def , containerOverrides = { 'environment' : environment_vars , 'command' : command_list } , retryStrategy = { 'attempts' : num_tries } ) logger . info ( "submitted..." ) job_list . append ( { 'jobId' : job_info [ 'jobId' ] } ) self . job_list = job_list return job_list
Submit a batch of reading jobs
17,278
def watch_and_wait ( self , poll_interval = 10 , idle_log_timeout = None , kill_on_timeout = False , stash_log_method = None , tag_instances = False , ** kwargs ) : return wait_for_complete ( self . _job_queue , job_list = self . job_list , job_name_prefix = self . basename , poll_interval = poll_interval , idle_log_timeout = idle_log_timeout , kill_on_log_timeout = kill_on_timeout , stash_log_method = stash_log_method , tag_instances = tag_instances , ** kwargs )
This provides shortcut access to the wait_for_complete_function .
17,279
def run ( self , input_fname , ids_per_job , stagger = 0 , ** wait_params ) : submit_thread = Thread ( target = self . submit_reading , args = ( input_fname , 0 , None , ids_per_job ) , kwargs = { 'stagger' : stagger } , daemon = True ) submit_thread . start ( ) self . watch_and_wait ( ** wait_params ) submit_thread . join ( 0 ) if submit_thread . is_alive ( ) : logger . warning ( "Submit thread is still running even after job" "completion." ) return
Run this submission all the way .
17,280
def set_options ( self , force_read = False , force_fulltext = False ) : self . options [ 'force_read' ] = force_read self . options [ 'force_fulltext' ] = force_fulltext return
Set the options for this run .
17,281
def get_chebi_name_from_id ( chebi_id , offline = False ) : chebi_name = chebi_id_to_name . get ( chebi_id ) if chebi_name is None and not offline : chebi_name = get_chebi_name_from_id_web ( chebi_id ) return chebi_name
Return a ChEBI name corresponding to the given ChEBI ID .
17,282
def get_chebi_name_from_id_web ( chebi_id ) : url_base = 'http://www.ebi.ac.uk/webservices/chebi/2.0/test/' url_fmt = url_base + 'getCompleteEntity?chebiId=%s' resp = requests . get ( url_fmt % chebi_id ) if resp . status_code != 200 : logger . warning ( "Got bad code form CHEBI client: %s" % resp . status_code ) return None tree = etree . fromstring ( resp . content ) for elem in tree . getiterator ( ) : if not hasattr ( elem . tag , 'find' ) : continue i = elem . tag . find ( '}' ) if i >= 0 : elem . tag = elem . tag [ i + 1 : ] objectify . deannotate ( tree , cleanup_namespaces = True ) elem = tree . find ( 'Body/getCompleteEntityResponse/return/chebiAsciiName' ) if elem is not None : return elem . text return None
Return a ChEBI mame corresponding to a given ChEBI ID using a REST API .
17,283
def get_subnetwork ( statements , nodes , relevance_network = None , relevance_node_lim = 10 ) : if relevance_network is not None : relevant_nodes = _find_relevant_nodes ( nodes , relevance_network , relevance_node_lim ) all_nodes = nodes + relevant_nodes else : all_nodes = nodes filtered_statements = _filter_statements ( statements , all_nodes ) pa = PysbAssembler ( ) pa . add_statements ( filtered_statements ) model = pa . make_model ( ) return model
Return a PySB model based on a subset of given INDRA Statements .
17,284
def _filter_statements ( statements , agents ) : filtered_statements = [ ] for s in stmts : if all ( [ a is not None for a in s . agent_list ( ) ] ) and all ( [ a . name in agents for a in s . agent_list ( ) ] ) : filtered_statements . append ( s ) return filtered_statements
Return INDRA Statements which have Agents in the given list .
17,285
def _find_relevant_nodes ( query_nodes , relevance_network , relevance_node_lim ) : all_nodes = relevance_client . get_relevant_nodes ( relevance_network , query_nodes ) nodes = [ n [ 0 ] for n in all_nodes [ : relevance_node_lim ] ] return nodes
Return a list of nodes that are relevant for the query .
17,286
def process_jsonld_file ( fname ) : with open ( fname , 'r' ) as fh : json_dict = json . load ( fh ) return process_jsonld ( json_dict )
Process a JSON - LD file in the new format to extract Statements .
17,287
def tag_instance ( instance_id , ** tags ) : logger . debug ( "Got request to add tags %s to instance %s." % ( str ( tags ) , instance_id ) ) ec2 = boto3 . resource ( 'ec2' ) instance = ec2 . Instance ( instance_id ) filtered_tags = { k : v for k , v in tags . items ( ) if v and k } if instance . tags is not None : existing_tags = { tag . get ( 'Key' ) : tag . get ( 'Value' ) for tag in instance . tags } logger . debug ( "Ignoring existing tags; %s" % str ( existing_tags ) ) for tag_key in existing_tags . keys ( ) : filtered_tags . pop ( tag_key , None ) tag_list = [ { 'Key' : k , 'Value' : v } for k , v in filtered_tags . items ( ) ] if len ( tag_list ) : logger . info ( 'Adding project tags "%s" to instance %s' % ( filtered_tags , instance_id ) ) instance . create_tags ( Tags = tag_list ) else : logger . info ( 'No new tags from: %s' % str ( tags ) ) return
Tag a single ec2 instance .
17,288
def tag_myself ( project = 'cwc' , ** other_tags ) : base_url = "http://169.254.169.254" try : resp = requests . get ( base_url + "/latest/meta-data/instance-id" ) except requests . exceptions . ConnectionError : logger . warning ( "Could not connect to service. Note this should only " "be run from within a batch job." ) return instance_id = resp . text tag_instance ( instance_id , project = project , ** other_tags ) return
Function run when indra is used in an EC2 instance to apply tags .
17,289
def get_batch_command ( command_list , project = None , purpose = None ) : command_str = ' ' . join ( command_list ) ret = [ 'python' , '-m' , 'indra.util.aws' , 'run_in_batch' , command_str ] if not project and has_config ( 'DEFAULT_AWS_PROJECT' ) : project = get_config ( 'DEFAULT_AWS_PROJECT' ) if project : ret += [ '--project' , project ] if purpose : ret += [ '--purpose' , purpose ] return ret
Get the command appropriate for running something on batch .
17,290
def get_jobs ( job_queue = 'run_reach_queue' , job_status = 'RUNNING' ) : batch = boto3 . client ( 'batch' ) jobs = batch . list_jobs ( jobQueue = job_queue , jobStatus = job_status ) return jobs . get ( 'jobSummaryList' )
Returns a list of dicts with jobName and jobId for each job with the given status .
17,291
def get_job_log ( job_info , log_group_name = '/aws/batch/job' , write_file = True , verbose = False ) : job_name = job_info [ 'jobName' ] job_id = job_info [ 'jobId' ] logs = boto3 . client ( 'logs' ) batch = boto3 . client ( 'batch' ) resp = batch . describe_jobs ( jobs = [ job_id ] ) job_desc = resp [ 'jobs' ] [ 0 ] job_def_name = job_desc [ 'jobDefinition' ] . split ( '/' ) [ - 1 ] . split ( ':' ) [ 0 ] task_arn_id = job_desc [ 'container' ] [ 'taskArn' ] . split ( '/' ) [ - 1 ] log_stream_name = '%s/default/%s' % ( job_def_name , task_arn_id ) stream_resp = logs . describe_log_streams ( logGroupName = log_group_name , logStreamNamePrefix = log_stream_name ) streams = stream_resp . get ( 'logStreams' ) if not streams : logger . warning ( 'No streams for job' ) return None elif len ( streams ) > 1 : logger . warning ( 'More than 1 stream for job, returning first' ) log_stream_name = streams [ 0 ] [ 'logStreamName' ] if verbose : logger . info ( "Getting log for %s/%s" % ( job_name , job_id ) ) out_file = ( '%s_%s.log' % ( job_name , job_id ) ) if write_file else None lines = get_log_by_name ( log_group_name , log_stream_name , out_file , verbose ) return lines
Gets the Cloudwatch log associated with the given job .
17,292
def get_log_by_name ( log_group_name , log_stream_name , out_file = None , verbose = True ) : logs = boto3 . client ( 'logs' ) kwargs = { 'logGroupName' : log_group_name , 'logStreamName' : log_stream_name , 'startFromHead' : True } lines = [ ] while True : response = logs . get_log_events ( ** kwargs ) if response . get ( 'nextForwardToken' ) == kwargs . get ( 'nextToken' ) : break else : events = response . get ( 'events' ) if events : lines += [ '%s: %s\n' % ( evt [ 'timestamp' ] , evt [ 'message' ] ) for evt in events ] kwargs [ 'nextToken' ] = response . get ( 'nextForwardToken' ) if verbose : logger . info ( '%d %s' % ( len ( lines ) , lines [ - 1 ] ) ) if out_file : with open ( out_file , 'wt' ) as f : for line in lines : f . write ( line ) return lines
Download a log given the log s group and stream name .
17,293
def dump_logs ( job_queue = 'run_reach_queue' , job_status = 'RUNNING' ) : jobs = get_jobs ( job_queue , job_status ) for job in jobs : get_job_log ( job , write_file = True )
Write logs for all jobs with given the status to files .
17,294
def get_s3_file_tree ( s3 , bucket , prefix ) : def get_some_keys ( keys , marker = None ) : if marker : relevant_files = s3 . list_objects ( Bucket = bucket , Prefix = prefix , Marker = marker ) else : relevant_files = s3 . list_objects ( Bucket = bucket , Prefix = prefix ) keys . extend ( [ entry [ 'Key' ] for entry in relevant_files [ 'Contents' ] if entry [ 'Key' ] != marker ] ) return relevant_files [ 'IsTruncated' ] file_keys = [ ] marker = None while get_some_keys ( file_keys , marker ) : marker = file_keys [ - 1 ] file_tree = NestedDict ( ) pref_path = prefix . split ( '/' ) [ : - 1 ] for key in file_keys : full_path = key . split ( '/' ) relevant_path = full_path [ len ( pref_path ) : ] curr = file_tree for step in relevant_path : curr = curr [ step ] curr [ 'key' ] = key return file_tree
Overcome s3 response limit and return NestedDict tree of paths .
17,295
def print_model ( self , include_unsigned_edges = False ) : sif_str = '' for edge in self . graph . edges ( data = True ) : n1 = edge [ 0 ] n2 = edge [ 1 ] data = edge [ 2 ] polarity = data . get ( 'polarity' ) if polarity == 'negative' : rel = '-1' elif polarity == 'positive' : rel = '1' elif include_unsigned_edges : rel = '0' else : continue sif_str += '%s %s %s\n' % ( n1 , rel , n2 ) return sif_str
Return a SIF string of the assembled model .
17,296
def save_model ( self , fname , include_unsigned_edges = False ) : sif_str = self . print_model ( include_unsigned_edges ) with open ( fname , 'wb' ) as fh : fh . write ( sif_str . encode ( 'utf-8' ) )
Save the assembled model s SIF string into a file .
17,297
def print_boolean_net ( self , out_file = None ) : init_str = '' for node_key in self . graph . nodes ( ) : node_name = self . graph . node [ node_key ] [ 'name' ] init_str += '%s = False\n' % node_name rule_str = '' for node_key in self . graph . nodes ( ) : node_name = self . graph . node [ node_key ] [ 'name' ] in_edges = self . graph . in_edges ( node_key ) if not in_edges : continue parents = [ e [ 0 ] for e in in_edges ] polarities = [ self . graph . edge [ e [ 0 ] ] [ node_key ] [ 'polarity' ] for e in in_edges ] pos_parents = [ par for par , pol in zip ( parents , polarities ) if pol == 'positive' ] neg_parents = [ par for par , pol in zip ( parents , polarities ) if pol == 'negative' ] rhs_pos_parts = [ ] for par in pos_parents : rhs_pos_parts . append ( self . graph . node [ par ] [ 'name' ] ) rhs_pos_str = ' or ' . join ( rhs_pos_parts ) rhs_neg_parts = [ ] for par in neg_parents : rhs_neg_parts . append ( self . graph . node [ par ] [ 'name' ] ) rhs_neg_str = ' or ' . join ( rhs_neg_parts ) if rhs_pos_str : if rhs_neg_str : rhs_str = '(' + rhs_pos_str + ') and not (' + rhs_neg_str + ')' else : rhs_str = rhs_pos_str else : rhs_str = 'not (' + rhs_neg_str + ')' node_eq = '%s* = %s\n' % ( node_name , rhs_str ) rule_str += node_eq full_str = init_str + '\n' + rule_str if out_file is not None : with open ( out_file , 'wt' ) as fh : fh . write ( full_str ) return full_str
Return a Boolean network from the assembled graph .
17,298
def _ensure_api_keys ( task_desc , failure_ret = None ) : def check_func_wrapper ( func ) : @ wraps ( func ) def check_api_keys ( * args , ** kwargs ) : global ELSEVIER_KEYS if ELSEVIER_KEYS is None : ELSEVIER_KEYS = { } if not has_config ( INST_KEY_ENV_NAME ) : logger . warning ( 'Institution API key %s not found in config ' 'file or environment variable: this will ' 'limit access for %s' % ( INST_KEY_ENV_NAME , task_desc ) ) ELSEVIER_KEYS [ 'X-ELS-Insttoken' ] = get_config ( INST_KEY_ENV_NAME ) if not has_config ( API_KEY_ENV_NAME ) : logger . error ( 'API key %s not found in configuration file ' 'or environment variable: cannot %s' % ( API_KEY_ENV_NAME , task_desc ) ) return failure_ret ELSEVIER_KEYS [ 'X-ELS-APIKey' ] = get_config ( API_KEY_ENV_NAME ) elif 'X-ELS-APIKey' not in ELSEVIER_KEYS . keys ( ) : logger . error ( 'No Elsevier API key %s found: cannot %s' % ( API_KEY_ENV_NAME , task_desc ) ) return failure_ret return func ( * args , ** kwargs ) return check_api_keys return check_func_wrapper
Wrap Elsevier methods which directly use the API keys .
17,299
def check_entitlement ( doi ) : if doi . lower ( ) . startswith ( 'doi:' ) : doi = doi [ 4 : ] url = '%s/%s' % ( elsevier_entitlement_url , doi ) params = { 'httpAccept' : 'text/xml' } res = requests . get ( url , params , headers = ELSEVIER_KEYS ) if not res . status_code == 200 : logger . error ( 'Could not check entitlements for article %s: ' 'status code %d' % ( doi , res . status_code ) ) logger . error ( 'Response content: %s' % res . text ) return False return True
Check whether IP and credentials enable access to content for a doi .