idx int64 0 63k | question stringlengths 53 5.28k | target stringlengths 5 805 |
|---|---|---|
6,300 | def _onGlobal ( self , name , line , pos , absPosition , level ) : for item in self . globals : if item . name == name : return self . globals . append ( Global ( name , line , pos , absPosition ) ) | Memorizes a global variable |
6,301 | def _onClass ( self , name , line , pos , absPosition , keywordLine , keywordPos , colonLine , colonPos , level ) : self . __flushLevel ( level ) c = Class ( name , line , pos , absPosition , keywordLine , keywordPos , colonLine , colonPos ) if self . __lastDecorators is not None : c . decorators = self . __lastDecorators self . __lastDecorators = None self . objectsStack . append ( c ) | Memorizes a class |
6,302 | def _onWhat ( self , name , line , pos , absPosition ) : self . __lastImport . what . append ( ImportWhat ( name , line , pos , absPosition ) ) | Memorizes an imported item |
6,303 | def _onClassAttribute ( self , name , line , pos , absPosition , level ) : attributes = self . objectsStack [ level ] . classAttributes for item in attributes : if item . name == name : return attributes . append ( ClassAttribute ( name , line , pos , absPosition ) ) | Memorizes a class attribute |
6,304 | def _onInstanceAttribute ( self , name , line , pos , absPosition , level ) : attributes = self . objectsStack [ level - 1 ] . instanceAttributes for item in attributes : if item . name == name : return attributes . append ( InstanceAttribute ( name , line , pos , absPosition ) ) | Memorizes a class instance attribute |
6,305 | def _onArgument ( self , name , annotation ) : self . objectsStack [ - 1 ] . arguments . append ( Argument ( name , annotation ) ) | Memorizes a function argument |
6,306 | def _onError ( self , message ) : self . isOK = False if message . strip ( ) != "" : self . errors . append ( message ) | Memorizies a parser error message |
6,307 | def _onLexerError ( self , message ) : self . isOK = False if message . strip ( ) != "" : self . lexerErrors . append ( message ) | Memorizes a lexer error message |
6,308 | def gen_mapname ( ) : filepath = None while ( filepath is None ) or ( os . path . exists ( os . path . join ( config [ 'mapfiles_dir' ] , filepath ) ) ) : filepath = '%s.map' % _gen_string ( ) return filepath | Generate a uniq mapfile pathname . |
6,309 | def config_content ( self , command , vars ) : settable_vars = [ var ( 'db_url' , 'Database url for sqlite, postgres or mysql' , default = 'sqlite:///%(here)s/studio.db' ) , var ( 'ms_url' , 'Url to the mapserv CGI' , default = 'http://localhost/cgi-bin/mapserv' ) , var ( 'admin_password' , 'Password for default admin user' , default = secret . secret_string ( length = 8 ) ) ] for svar in settable_vars : if command . interactive : prompt = 'Enter %s' % svar . full_description ( ) response = command . challenge ( prompt , svar . default , svar . should_echo ) vars [ svar . name ] = response else : if not vars . has_key ( svar . name ) : vars [ svar . name ] = svar . default vars [ 'cookie_secret' ] = secret . secret_string ( ) return super ( StudioInstaller , self ) . config_content ( command , vars ) | Called by self . write_config this returns the text content for the config file given the provided variables . |
6,310 | def _resolve_definitions ( self , schema , definitions ) : if not definitions : return schema if not isinstance ( schema , dict ) : return schema ref = schema . pop ( '$ref' , None ) if ref : path = ref . split ( '/' ) [ 2 : ] definition = definitions for component in path : definition = definitions [ component ] if definition : for ( key , val ) in six . iteritems ( definition ) : if key not in schema : schema [ key ] = val for key in six . iterkeys ( schema ) : schema [ key ] = self . _resolve_definitions ( schema [ key ] , definitions ) return schema | Interpolates definitions from the top - level definitions key into the schema . This is performed in a cut - down way similar to JSON schema . |
6,311 | def _get_serializer ( self , _type ) : if _type in _serializers : return _serializers [ _type ] elif _type == 'array' : return self . _get_array_serializer ( ) elif _type == 'object' : return self . _get_object_serializer ( ) raise ValueError ( 'Unknown type: {}' . format ( _type ) ) | Gets a serializer for a particular type . For primitives returns the serializer from the module - level serializers . For arrays and objects uses the special _get_T_serializer methods to build the encoders and decoders . |
6,312 | def _get_array_serializer ( self ) : if not self . _items : raise ValueError ( 'Must specify \'items\' for \'array\' type' ) field = SchemaField ( self . _items ) def encode ( value , field = field ) : if not isinstance ( value , list ) : value = [ value ] return [ field . encode ( i ) for i in value ] def decode ( value , field = field ) : return [ field . decode ( i ) for i in value ] return ( encode , decode ) | Gets the encoder and decoder for an array . Uses the items key to build the encoders and decoders for the specified type . |
6,313 | def encode ( self , value ) : if value is None and self . _default is not None : value = self . _default for encoder in self . _encoders : try : return encoder ( value ) except ValueError as ex : pass raise ValueError ( 'Value \'{}\' is invalid. {}' . format ( value , ex . message ) ) | The encoder for this schema . Tries each encoder in order of the types specified for this schema . |
6,314 | def decode ( self , value ) : has_null_encoder = bool ( encode_decode_null in self . _decoders ) if value is None and self . _default is not None and not has_null_encoder : value = self . _default for decoder in self . _decoders : try : return decoder ( value ) except ValueError as ex : pass raise ValueError ( 'Value \'{}\' is invalid. {}' . format ( value , ex . message ) ) | The decoder for this schema . Tries each decoder in order of the types specified for this schema . |
6,315 | def _fail_early ( message , ** kwds ) : import json output = dict ( kwds ) output . update ( { 'msg' : message , 'failed' : True , } ) print ( json . dumps ( output ) ) sys . exit ( 1 ) | The module arguments are dynamically generated based on the Opsview version . This means that fail_json isn t available until after the module has been properly initialized and the schemas have been loaded . |
6,316 | def _compare_recursive ( old , new ) : if isinstance ( new , dict ) : for key in six . iterkeys ( new ) : try : if _compare_recursive ( old [ key ] , new [ key ] ) : return True except ( KeyError , TypeError ) : return True elif isinstance ( new , list ) or isinstance ( new , tuple ) : for i , item in enumerate ( new ) : try : if _compare_recursive ( old [ i ] , item ) : return True except ( IndexError , TypeError ) : return True else : return old != new return False | Deep comparison between objects ; assumes that new contains user defined parameters so only keys which exist in new will be compared . Returns True if they differ . Else False . |
6,317 | def _requires_update ( self , old_object , new_object ) : old_encoded = self . manager . _encode ( old_object ) new_encoded = self . manager . _encode ( new_object ) return _compare_recursive ( old_encoded , new_encoded ) | Checks whether the old object and new object differ ; only checks keys which exist in the new object |
6,318 | def url2domain ( url ) : parsed_uri = urlparse . urlparse ( url ) domain = '{uri.netloc}' . format ( uri = parsed_uri ) domain = re . sub ( "^.+@" , "" , domain ) domain = re . sub ( ":.+$" , "" , domain ) return domain | extract domain from url |
6,319 | def init_model ( engine ) : if meta . Session is None : sm = orm . sessionmaker ( autoflush = True , autocommit = False , bind = engine ) meta . engine = engine meta . Session = orm . scoped_session ( sm ) | Call me before using any of the tables or classes in the model |
6,320 | def register_prop ( name , handler_get , handler_set ) : global props_get , props_set if handler_get : props_get [ name ] = handler_get if handler_set : props_set [ name ] = handler_set | register a property handler |
6,321 | def retrieve_prop ( name ) : handler_get , handler_set = None , None if name in props_get : handler_get = props_get [ name ] if name in props_set : handler_set = props_set [ name ] return ( name , handler_get , handler_set ) | retrieve a property handler |
6,322 | def get_queryset ( self ) : model_type = self . request . GET . get ( "type" ) pk = self . request . GET . get ( "id" ) content_type_model = ContentType . objects . get ( model = model_type . lower ( ) ) Model = content_type_model . model_class ( ) model_obj = Model . objects . filter ( id = pk ) . first ( ) return Comment . objects . filter_by_object ( model_obj ) | Parameters are already validated in the QuerySetPermission |
6,323 | def extractall ( archive , filename , dstdir ) : if zipfile . is_zipfile ( archive ) : z = zipfile . ZipFile ( archive ) for name in z . namelist ( ) : targetname = name if targetname . endswith ( '/' ) : targetname = targetname [ : - 1 ] if targetname . startswith ( os . path . sep ) : targetname = os . path . join ( dstdir , targetname [ 1 : ] ) else : targetname = os . path . join ( dstdir , targetname ) targetname = os . path . normpath ( targetname ) upperdirs = os . path . dirname ( targetname ) if upperdirs and not os . path . exists ( upperdirs ) : os . makedirs ( upperdirs ) if not name . endswith ( '/' ) : file ( targetname , 'wb' ) . write ( z . read ( name ) ) elif tarfile . is_tarfile ( archive ) : tar = tarfile . open ( archive ) tar . extractall ( path = dstdir ) else : shutil . copyfile ( archive , os . path . join ( dstdir , filename ) ) | extract zip or tar content to dstdir |
6,324 | def _merge_js ( input_file , input_dir , output_file ) : from studio . lib . buildjs import merge_js merge_js . main ( input_file , input_dir , output_file ) | Call into the merge_js module to merge the js files and minify the code . |
6,325 | def lcopt_bw2_setup ( ecospold_path , overwrite = False , db_name = None ) : default_ei_name = "Ecoinvent3_3_cutoff" if db_name is None : db_name = DEFAULT_PROJECT_STEM + default_ei_name if db_name in bw2 . projects : if overwrite : bw2 . projects . delete_project ( name = db_name , delete_dir = True ) else : print ( 'Looks like bw2 is already set up - if you want to overwrite the existing version run lcopt.utils.lcopt_bw2_setup in a python shell using overwrite = True' ) return False bw2 . projects . set_current ( db_name ) bw2 . bw2setup ( ) ei = bw2 . SingleOutputEcospold2Importer ( fix_mac_path_escapes ( ecospold_path ) , default_ei_name ) ei . apply_strategies ( ) ei . statistics ( ) ei . write_database ( ) return True | Utility function to set up brightway2 to work correctly with lcopt . |
6,326 | def lcopt_bw2_autosetup ( ei_username = None , ei_password = None , write_config = None , ecoinvent_version = '3.3' , ecoinvent_system_model = "cutoff" , overwrite = False ) : ei_name = "Ecoinvent{}_{}_{}" . format ( * ecoinvent_version . split ( '.' ) , ecoinvent_system_model ) config = check_for_config ( ) if config is None : config = DEFAULT_CONFIG with open ( storage . config_file , "w" ) as cfg : yaml . dump ( config , cfg , default_flow_style = False ) store_option = storage . project_type if store_option == 'single' : project_name = storage . single_project_name if bw2_project_exists ( project_name ) : bw2 . projects . set_current ( project_name ) if ei_name in bw2 . databases and overwrite == False : return True else : project_name = DEFAULT_PROJECT_STEM + ei_name if bw2_project_exists ( project_name ) : if overwrite : bw2 . projects . delete_project ( name = project_name , delete_dir = True ) auto_ecoinvent = partial ( eidl . get_ecoinvent , db_name = ei_name , auto_write = True , version = ecoinvent_version , system_model = ecoinvent_system_model ) if config is not None : if "ecoinvent" in config : if ei_username is None : ei_username = config [ 'ecoinvent' ] . get ( 'username' ) if ei_password is None : ei_password = config [ 'ecoinvent' ] . get ( 'password' ) write_config = False if ei_username is None : ei_username = input ( 'ecoinvent username: ' ) if ei_password is None : ei_password = getpass . getpass ( 'ecoinvent password: ' ) if write_config is None : write_config = input ( 'store username and password on this computer? y/[n]' ) in [ 'y' , 'Y' , 'yes' , 'YES' , 'Yes' ] if write_config : config [ 'ecoinvent' ] = { 'username' : ei_username , 'password' : ei_password } with open ( storage . config_file , "w" ) as cfg : yaml . dump ( config , cfg , default_flow_style = False ) if store_option == 'single' : if bw2_project_exists ( project_name ) : bw2 . projects . set_current ( project_name ) else : if not bw2_project_exists ( DEFAULT_BIOSPHERE_PROJECT ) : bw2 . projects . set_current ( project_name ) bw2 . bw2setup ( ) else : bw2 . projects . set_current ( DEFAULT_BIOSPHERE_PROJECT ) bw2 . create_core_migrations ( ) bw2 . projects . copy_project ( project_name , switch = True ) else : if not bw2_project_exists ( DEFAULT_BIOSPHERE_PROJECT ) : lcopt_biosphere_setup ( ) bw2 . projects . set_current ( DEFAULT_BIOSPHERE_PROJECT ) bw2 . create_core_migrations ( ) bw2 . projects . copy_project ( project_name , switch = True ) if ei_username is not None and ei_password is not None : auto_ecoinvent ( username = ei_username , password = ei_password ) else : auto_ecoinvent ( ) write_search_index ( project_name , ei_name , overwrite = overwrite ) return True | Utility function to automatically set up brightway2 to work correctly with lcopt . |
6,327 | def forwast_autodownload ( FORWAST_URL ) : dirpath = tempfile . mkdtemp ( ) r = requests . get ( FORWAST_URL ) z = zipfile . ZipFile ( io . BytesIO ( r . content ) ) z . extractall ( dirpath ) return os . path . join ( dirpath , 'forwast.bw2package' ) | Autodownloader for forwast database package for brightway . Used by lcopt_bw2_forwast_setup to get the database data . Not designed to be used on its own |
6,328 | def lcopt_bw2_forwast_setup ( use_autodownload = True , forwast_path = None , db_name = FORWAST_PROJECT_NAME , overwrite = False ) : if use_autodownload : forwast_filepath = forwast_autodownload ( FORWAST_URL ) elif forwast_path is not None : forwast_filepath = forwast_path else : raise ValueError ( 'Need a path if not using autodownload' ) if storage . project_type == 'single' : db_name = storage . single_project_name if bw2_project_exists ( db_name ) : bw2 . projects . set_current ( db_name ) else : bw2 . projects . set_current ( db_name ) bw2 . bw2setup ( ) else : if db_name in bw2 . projects : if overwrite : bw2 . projects . delete_project ( name = db_name , delete_dir = True ) else : print ( 'Looks like bw2 is already set up for the FORWAST database - if you want to overwrite the existing version run lcopt.utils.lcopt_bw2_forwast_setup in a python shell using overwrite = True' ) return False if not bw2_project_exists ( DEFAULT_BIOSPHERE_PROJECT ) : lcopt_biosphere_setup ( ) bw2 . projects . set_current ( DEFAULT_BIOSPHERE_PROJECT ) bw2 . create_core_migrations ( ) bw2 . projects . copy_project ( db_name , switch = True ) bw2 . BW2Package . import_file ( forwast_filepath ) return True | Utility function to set up brightway2 to work correctly with lcopt using the FORWAST database instead of ecoinvent |
6,329 | def _validate_samples_factors ( mwtabfile , validate_samples = True , validate_factors = True ) : from_subject_samples = { i [ "local_sample_id" ] for i in mwtabfile [ "SUBJECT_SAMPLE_FACTORS" ] [ "SUBJECT_SAMPLE_FACTORS" ] } from_subject_factors = { i [ "factors" ] for i in mwtabfile [ "SUBJECT_SAMPLE_FACTORS" ] [ "SUBJECT_SAMPLE_FACTORS" ] } if validate_samples : if "MS_METABOLITE_DATA" in mwtabfile : from_metabolite_data_samples = set ( mwtabfile [ "MS_METABOLITE_DATA" ] [ "MS_METABOLITE_DATA_START" ] [ "Samples" ] ) assert from_subject_samples == from_metabolite_data_samples if "NMR_BINNED_DATA" in mwtabfile : from_nmr_binned_data_samples = set ( mwtabfile [ "NMR_BINNED_DATA" ] [ "NMR_BINNED_DATA_START" ] [ "Fields" ] [ 1 : ] ) assert from_subject_samples == from_nmr_binned_data_samples if validate_factors : if "MS_METABOLITE_DATA" in mwtabfile : from_metabolite_data_factors = set ( mwtabfile [ "MS_METABOLITE_DATA" ] [ "MS_METABOLITE_DATA_START" ] [ "Factors" ] ) assert from_subject_factors == from_metabolite_data_factors | Validate Samples and Factors identifiers across the file . |
6,330 | def daemonize ( self ) : if self . userid : uid = pwd . getpwnam ( self . userid ) . pw_uid os . seteuid ( uid ) try : pid = os . fork ( ) if pid > 0 : sys . exit ( 0 ) except OSError as err : sys . stderr . write ( "First fork failed: {0} ({1})\n" . format ( err . errno , err . strerror ) ) sys . exit ( 1 ) os . chdir ( "/" ) os . setsid ( ) os . umask ( 0 ) try : pid = os . fork ( ) if pid > 0 : sys . exit ( 0 ) except OSError as err : sys . stderr . write ( "Second fork failed: {0} ({1})\n" . format ( err . errno , err . strerror ) ) sys . exit ( 1 ) sys . stdout . flush ( ) sys . stderr . flush ( ) si = open ( self . stdin , 'r' ) so = open ( self . stdout , 'w' ) se = open ( self . stderr , 'w' ) os . dup2 ( si . fileno ( ) , sys . stdin . fileno ( ) ) os . dup2 ( so . fileno ( ) , sys . stdout . fileno ( ) ) os . dup2 ( se . fileno ( ) , sys . stderr . fileno ( ) ) atexit . register ( self . delpid ) pid = str ( os . getpid ( ) ) open ( self . pidfile , 'w' ) . write ( "%s\n" % pid ) | Double - fork magic |
6,331 | def RenderJson ( self , pretty = False ) : steps = self . _steps topdict = { } topdict [ 'tropo' ] = steps if pretty : try : json = jsonlib . dumps ( topdict , indent = 4 , sort_keys = False ) except TypeError : json = jsonlib . dumps ( topdict ) else : json = jsonlib . dumps ( topdict ) return json | Render a Tropo object into a Json string . |
6,332 | def getIndexedValue ( self , index ) : actions = self . _actions if ( type ( actions ) is list ) : dict = actions [ index ] else : dict = actions return dict . get ( 'value' , 'NoValue' ) | Get the value of the indexed Tropo action . |
6,333 | def getNamedActionValue ( self , name ) : actions = self . _actions if ( type ( actions ) is list ) : for a in actions : if a . get ( 'name' , 'NoValue' ) == name : dict = a else : dict = actions return dict . get ( 'value' , 'NoValue' ) | Get the value of the named Tropo action . |
6,334 | def stop_subprocess ( pid ) : if hasattr ( os , "kill" ) : import signal os . kill ( pid , signal . SIGTERM ) else : import win32api pid = win32api . OpenProcess ( 1 , 0 , pid ) win32api . TerminateProcess ( pid , 0 ) os . waitpid ( pid , 0 ) | Stop subprocess whose process id is pid . |
6,335 | def file2abspath ( filename , this_file = __file__ ) : return os . path . abspath ( os . path . join ( os . path . dirname ( os . path . abspath ( this_file ) ) , filename ) ) | generate absolute path for the given file and base dir |
6,336 | def file2json ( filename , encoding = 'utf-8' ) : with codecs . open ( filename , "r" , encoding = encoding ) as f : return json . load ( f ) | save a line |
6,337 | def file2iter ( filename , encoding = 'utf-8' , comment_prefix = "#" , skip_empty_line = True ) : ret = list ( ) visited = set ( ) with codecs . open ( filename , encoding = encoding ) as f : for line in f : line = line . strip ( ) if skip_empty_line and len ( line ) == 0 : continue if comment_prefix and line . startswith ( comment_prefix ) : continue yield line | json stream parsing or line parsing |
6,338 | def json2file ( data , filename , encoding = 'utf-8' ) : with codecs . open ( filename , "w" , encoding = encoding ) as f : json . dump ( data , f , ensure_ascii = False , indent = 4 , sort_keys = True ) | write json in canonical json format |
6,339 | def lines2file ( lines , filename , encoding = 'utf-8' ) : with codecs . open ( filename , "w" , encoding = encoding ) as f : for line in lines : f . write ( line ) f . write ( "\n" ) | write json stream write lines too |
6,340 | def items2file ( items , filename , encoding = 'utf-8' , modifier = 'w' ) : with codecs . open ( filename , modifier , encoding = encoding ) as f : for item in items : f . write ( u"{}\n" . format ( json . dumps ( item , ensure_ascii = False , sort_keys = True ) ) ) | json array to file canonical json format |
6,341 | def convert ( schema ) : if isinstance ( schema , vol . Schema ) : schema = schema . schema if isinstance ( schema , Mapping ) : val = [ ] for key , value in schema . items ( ) : description = None if isinstance ( key , vol . Marker ) : pkey = key . schema description = key . description else : pkey = key pval = convert ( value ) pval [ 'name' ] = pkey if description is not None : pval [ 'description' ] = description if isinstance ( key , ( vol . Required , vol . Optional ) ) : pval [ key . __class__ . __name__ . lower ( ) ] = True if key . default is not vol . UNDEFINED : pval [ 'default' ] = key . default ( ) val . append ( pval ) return val if isinstance ( schema , vol . All ) : val = { } for validator in schema . validators : val . update ( convert ( validator ) ) return val if isinstance ( schema , ( vol . Clamp , vol . Range ) ) : val = { } if schema . min is not None : val [ 'valueMin' ] = schema . min if schema . max is not None : val [ 'valueMax' ] = schema . max return val if isinstance ( schema , vol . Length ) : val = { } if schema . min is not None : val [ 'lengthMin' ] = schema . min if schema . max is not None : val [ 'lengthMax' ] = schema . max return val if isinstance ( schema , vol . Datetime ) : return { 'type' : 'datetime' , 'format' : schema . format , } if isinstance ( schema , vol . In ) : if isinstance ( schema . container , Mapping ) : return { 'type' : 'select' , 'options' : list ( schema . container . items ( ) ) , } return { 'type' : 'select' , 'options' : [ ( item , item ) for item in schema . container ] } if schema in ( vol . Lower , vol . Upper , vol . Capitalize , vol . Title , vol . Strip ) : return { schema . __name__ . lower ( ) : True , } if isinstance ( schema , vol . Coerce ) : schema = schema . type if schema in TYPES_MAP : return { 'type' : TYPES_MAP [ schema ] } raise ValueError ( 'Unable to convert schema: {}' . format ( schema ) ) | Convert a voluptuous schema to a dictionary . |
6,342 | def version_cmp ( version_a , version_b ) : a = normalize_version ( version_a ) b = normalize_version ( version_b ) i_a = a [ 0 ] * 100 + a [ 1 ] * 10 + a [ 0 ] * 1 i_b = b [ 0 ] * 100 + b [ 1 ] * 10 + b [ 0 ] * 1 return i_a - i_b | Compares two versions |
6,343 | def getheader ( self , field , default = '' ) : if self . headers : for header in self . headers : if field . lower ( ) == header . lower ( ) : return self . headers [ header ] return default | Returns the HTTP response header field case insensitively |
6,344 | def isAlphanum ( c ) : return ( ( c >= 'a' and c <= 'z' ) or ( c >= '0' and c <= '9' ) or ( c >= 'A' and c <= 'Z' ) or c == '_' or c == '$' or c == '\\' or ( c is not None and ord ( c ) > 126 ) ) | return true if the character is a letter digit underscore dollar sign or non - ASCII character . |
6,345 | def _get ( self ) : c = self . theLookahead self . theLookahead = None if c == None : c = self . instream . read ( 1 ) if c >= ' ' or c == '\n' : return c if c == '' : return '\000' if c == '\r' : return '\n' return ' ' | return the next character from stdin . Watch out for lookahead . If the character is a control character translate it to a space or linefeed . |
6,346 | def _jsmin ( self ) : self . theA = '\n' self . _action ( 3 ) while self . theA != '\000' : if self . theA == ' ' : if isAlphanum ( self . theB ) : self . _action ( 1 ) else : self . _action ( 2 ) elif self . theA == '\n' : if self . theB in [ '{' , '[' , '(' , '+' , '-' ] : self . _action ( 1 ) elif self . theB == ' ' : self . _action ( 3 ) else : if isAlphanum ( self . theB ) : self . _action ( 1 ) else : self . _action ( 2 ) else : if self . theB == ' ' : if isAlphanum ( self . theA ) : self . _action ( 1 ) else : self . _action ( 3 ) elif self . theB == '\n' : if self . theA in [ '}' , ']' , ')' , '+' , '-' , '"' , '\'' ] : self . _action ( 1 ) else : if isAlphanum ( self . theA ) : self . _action ( 1 ) else : self . _action ( 3 ) else : self . _action ( 1 ) | Copy the input to the output deleting the characters which are insignificant to JavaScript . Comments will be removed . Tabs will be replaced with spaces . Carriage returns will be replaced with linefeeds . Most spaces and linefeeds will be removed . |
6,347 | def _get_lts_from_user ( self , user ) : req = meta . Session . query ( LayerTemplate ) . select_from ( join ( LayerTemplate , User ) ) return req . filter ( User . login == user ) . all ( ) | Get layertemplates owned by a user from the database . |
6,348 | def _get_lt_from_user_by_id ( self , user , lt_id ) : req = meta . Session . query ( LayerTemplate ) . select_from ( join ( LayerTemplate , User ) ) try : return req . filter ( and_ ( User . login == user , LayerTemplate . id == lt_id ) ) . one ( ) except Exception , e : return None | Get a layertemplate owned by a user from the database by lt_id . |
6,349 | def tokenizer ( text ) : stream = deque ( text . split ( "\n" ) ) while len ( stream ) > 0 : line = stream . popleft ( ) if line . startswith ( "#METABOLOMICS WORKBENCH" ) : yield KeyValue ( "#METABOLOMICS WORKBENCH" , "\n" ) yield KeyValue ( "HEADER" , line ) for identifier in line . split ( " " ) : if ":" in identifier : key , value = identifier . split ( ":" ) yield KeyValue ( key , value ) elif line . startswith ( "#ANALYSIS TYPE" ) : yield KeyValue ( "HEADER" , line ) elif line . startswith ( "#SUBJECT_SAMPLE_FACTORS:" ) : yield KeyValue ( "#ENDSECTION" , "\n" ) yield KeyValue ( "#SUBJECT_SAMPLE_FACTORS" , "\n" ) elif line . startswith ( "#" ) : yield KeyValue ( "#ENDSECTION" , "\n" ) yield KeyValue ( line . strip ( ) , "\n" ) elif line . startswith ( "SUBJECT_SAMPLE_FACTORS" ) : key , subject_type , local_sample_id , factors , additional_sample_data = line . split ( "\t" ) yield SubjectSampleFactors ( key . strip ( ) , subject_type , local_sample_id , factors , additional_sample_data ) elif line . endswith ( "_START" ) : yield KeyValue ( line , "\n" ) while not line . endswith ( "_END" ) : line = stream . popleft ( ) if line . endswith ( "_END" ) : yield KeyValue ( line . strip ( ) , "\n" ) else : data = line . split ( "\t" ) yield KeyValue ( data [ 0 ] , tuple ( data ) ) else : if line : if line . startswith ( "MS:MS_RESULTS_FILE" ) or line . startswith ( "NM:NMR_RESULTS_FILE" ) : try : key , value , extra = line . split ( "\t" ) extra_key , extra_value = extra . strip ( ) . split ( ":" ) yield KeyValueExtra ( key . strip ( ) [ 3 : ] , value , extra_key , extra_value ) except ValueError : key , value = line . split ( "\t" ) yield KeyValue ( key . strip ( ) [ 3 : ] , value ) else : try : key , value = line . split ( "\t" ) if ":" in key : if key . startswith ( "MS_METABOLITE_DATA:UNITS" ) : yield KeyValue ( key . strip ( ) , value ) else : yield KeyValue ( key . strip ( ) [ 3 : ] , value ) else : yield KeyValue ( key . strip ( ) , value ) except ValueError : print ( "LINE WITH ERROR:\n\t" , repr ( line ) ) raise yield KeyValue ( "#ENDSECTION" , "\n" ) yield KeyValue ( "!#ENDFILE" , "\n" ) | A lexical analyzer for the mwtab formatted files . |
6,350 | def _get_map_from_user_by_id ( self , user , map_id ) : req = Session . query ( Map ) . select_from ( join ( Map , User ) ) try : return req . filter ( and_ ( User . login == user , Map . id == map_id ) ) . one ( ) except Exception , e : return None | Get a mapfile owned by a user from the database by map_id . |
6,351 | def _get_maps_from_user ( self , user ) : req = Session . query ( Map ) . select_from ( join ( Map , User ) ) return req . filter ( User . login == user ) . all ( ) | Get mapfiles owned by a user from the database . |
6,352 | def _new_map_from_user ( self , user , name , filepath ) : map = Map ( name , filepath ) map . user = Session . query ( User ) . filter ( User . login == user ) . one ( ) Session . add ( map ) Session . commit ( ) return map | Create a new mapfile entry in database . |
6,353 | def _proxy ( self , url , urlparams = None ) : for k , v in request . params . iteritems ( ) : urlparams [ k ] = v query = urlencode ( urlparams ) full_url = url if query : if not full_url . endswith ( "?" ) : full_url += "?" full_url += query req = urllib2 . Request ( url = full_url ) for header in request . headers : if header . lower ( ) == "host" : req . add_header ( header , urlparse . urlparse ( url ) [ 1 ] ) else : req . add_header ( header , request . headers [ header ] ) res = urllib2 . urlopen ( req ) i = res . info ( ) response . status = res . code got_content_length = False for header in i : if header . lower ( ) == "transfer-encoding" : continue if header . lower ( ) == "content-length" : got_content_length = True response . headers [ header ] = i [ header ] result = res . read ( ) res . close ( ) return result | Do the actual action of proxying the call . |
6,354 | def open_file ( orig_file_path ) : unquoted = unquote ( orig_file_path ) paths = [ convert_to_platform_safe ( orig_file_path ) , "%s/index.html" % ( convert_to_platform_safe ( orig_file_path ) ) , orig_file_path , "%s/index.html" % orig_file_path , convert_to_platform_safe ( unquoted ) , "%s/index.html" % ( convert_to_platform_safe ( unquoted ) ) , unquoted , "%s/index.html" % unquoted , ] file_path = None handle = None for path in paths : try : file_path = path handle = open ( path , "rb" ) break except IOError : pass return handle | Taking in a file path attempt to open mock data files with it . |
6,355 | def attempt_open_query_permutations ( url , orig_file_path , is_header_file ) : directory = dirname ( convert_to_platform_safe ( orig_file_path ) ) + "/" try : filenames = [ f for f in os . listdir ( directory ) if isfile ( join ( directory , f ) ) ] except OSError : return if is_header_file : filenames = [ f for f in filenames if ".http-headers" in f ] filenames = [ f for f in filenames if _compare_file_name ( orig_file_path + ".http-headers" , directory , f ) ] else : filenames = [ f for f in filenames if ".http-headers" not in f ] filenames = [ f for f in filenames if _compare_file_name ( orig_file_path , directory , f ) ] url_parts = url . split ( "/" ) url_parts = url_parts [ len ( url_parts ) - 1 ] . split ( "?" ) base = url_parts [ 0 ] params = url_parts [ 1 ] params = params . split ( "&" ) filenames = [ f for f in filenames if f . startswith ( base ) ] params = [ convert_to_platform_safe ( unquote ( p ) ) for p in params ] for param in params : filenames = [ f for f in filenames if param in f ] if len ( filenames ) == 1 : path = join ( directory , filenames [ 0 ] ) return open_file ( path ) if len ( filenames ) > 1 : raise DataFailureException ( url , "Multiple mock data files matched the " + "parameters provided!" , 404 ) | Attempt to open a given mock data file with different permutations of the query parameters |
6,356 | def lookup ( self , value ) : for k , v in self . iteritems ( ) : if value == v : return k return None | return the first key in dict where value is name |
6,357 | def _getLPA ( self ) : " Provides line, pos and absPosition line as string " return str ( self . line ) + ":" + str ( self . pos ) + ":" + str ( self . absPosition ) | Provides line pos and absPosition line as string |
6,358 | def _onImport ( self , name , line , pos , absPosition ) : " Memorizes an import " if self . __lastImport is not None : self . imports . append ( self . __lastImport ) self . __lastImport = Import ( name , line , pos , absPosition ) return | Memorizes an import |
6,359 | def _onAs ( self , name ) : " Memorizes an alias for an import or an imported item " if self . __lastImport . what : self . __lastImport . what [ - 1 ] . alias = name else : self . __lastImport . alias = name return | Memorizes an alias for an import or an imported item |
6,360 | def comment_count ( obj ) : model_object = type ( obj ) . objects . get ( id = obj . id ) return model_object . comments . all ( ) . count ( ) | returns the count of comments of an object |
6,361 | def profile_url ( obj , profile_app_name , profile_model_name ) : try : content_type = ContentType . objects . get ( app_label = profile_app_name , model = profile_model_name . lower ( ) ) profile = content_type . get_object_for_this_type ( user = obj . user ) return profile . get_absolute_url ( ) except ContentType . DoesNotExist : return "" except AttributeError : return "" | returns profile url of user |
6,362 | def img_url ( obj , profile_app_name , profile_model_name ) : try : content_type = ContentType . objects . get ( app_label = profile_app_name , model = profile_model_name . lower ( ) ) except ContentType . DoesNotExist : return "" except AttributeError : return "" Profile = content_type . model_class ( ) fields = Profile . _meta . get_fields ( ) profile = content_type . model_class ( ) . objects . get ( user = obj . user ) for field in fields : if hasattr ( field , "upload_to" ) : return field . value_from_object ( profile ) . url | returns url of profile image of a user |
6,363 | def get_comments ( obj , request , oauth = False , paginate = False , cpp = 10 ) : model_object = type ( obj ) . objects . get ( id = obj . id ) comments = Comment . objects . filter_by_object ( model_object ) comments_count = comments . count ( ) if paginate : paginator = Paginator ( comments , cpp ) page = request . GET . get ( 'page' ) try : comments = paginator . page ( page ) except PageNotAnInteger : comments = paginator . page ( 1 ) except EmptyPage : comments = paginator . page ( paginator . num_pages ) try : profile_app_name = settings . PROFILE_APP_NAME profile_model_name = settings . PROFILE_MODEL_NAME except AttributeError : profile_app_name = None profile_model_name = None try : if settings . LOGIN_URL . startswith ( "/" ) : login_url = settings . LOGIN_URL else : login_url = "/" + settings . LOGIN_URL except AttributeError : login_url = "" return { "commentform" : CommentForm ( ) , "model_object" : obj , "user" : request . user , "comments" : comments , "oauth" : oauth , "profile_app_name" : profile_app_name , "profile_model_name" : profile_model_name , "paginate" : paginate , "login_url" : login_url , "cpp" : cpp } | Retrieves list of comments related to a certain object and renders The appropriate template to view it |
6,364 | def save ( self ) : if self . save_option == 'curdir' : model_path = os . path . join ( os . getcwd ( ) , '{}.lcopt' . format ( self . name ) ) else : model_path = os . path . join ( storage . model_dir , '{}.lcopt' . format ( self . name ) ) model_path = fix_mac_path_escapes ( model_path ) with open ( model_path , 'wb' ) as model_file : pickle . dump ( self , model_file ) | save the instance as a . lcopt file |
6,365 | def load ( self , filename ) : if filename [ - 6 : ] != ".lcopt" : filename += ".lcopt" try : savedInstance = pickle . load ( open ( "{}" . format ( filename ) , "rb" ) ) except FileNotFoundError : savedInstance = pickle . load ( open ( fix_mac_path_escapes ( os . path . join ( storage . model_dir , "{}" . format ( filename ) ) ) , "rb" ) ) attributes = [ 'name' , 'database' , 'params' , 'production_params' , 'allocation_params' , 'ext_params' , 'matrix' , 'names' , 'parameter_sets' , 'model_matrices' , 'technosphere_matrices' , 'leontif_matrices' , 'external_databases' , 'parameter_map' , 'sandbox_positions' , 'ecoinventName' , 'biosphereName' , 'forwastName' , 'analysis_settings' , 'technosphere_databases' , 'biosphere_databases' , 'result_set' , 'evaluated_parameter_sets' , 'useForwast' , 'base_project_name' , 'save_option' , 'allow_allocation' , 'ecoinvent_version' , 'ecoinvent_system_model' , ] for attr in attributes : if hasattr ( savedInstance , attr ) : setattr ( self , attr , getattr ( savedInstance , attr ) ) else : pass if not hasattr ( savedInstance , 'save_option' ) : setattr ( self , 'save_option' , LEGACY_SAVE_OPTION ) if not hasattr ( savedInstance , 'ecoinvent_version' ) or not hasattr ( savedInstance , 'ecoinvent_system_model' ) : parts = savedInstance . ecoinventName . split ( "_" ) main_version = parts [ 0 ] [ - 1 ] sub_version = parts [ 1 ] system_model = parts [ 2 ] setattr ( self , 'ecoinvent_version' , '{}.{}' . format ( main_version , sub_version ) ) setattr ( self , 'ecoinvent_system_model' , system_model ) | load data from a saved . lcopt file |
6,366 | def create_product ( self , name , location = 'GLO' , unit = 'kg' , ** kwargs ) : new_product = item_factory ( name = name , location = location , unit = unit , type = 'product' , ** kwargs ) if not self . exists_in_database ( new_product [ 'code' ] ) : self . add_to_database ( new_product ) return self . get_exchange ( name ) else : return False | Create a new product in the model database |
6,367 | def unlink_intermediate ( self , sourceId , targetId ) : source = self . database [ 'items' ] [ ( self . database . get ( 'name' ) , sourceId ) ] target = self . database [ 'items' ] [ ( self . database . get ( 'name' ) , targetId ) ] production_exchange = [ x [ 'input' ] for x in source [ 'exchanges' ] if x [ 'type' ] == 'production' ] [ 0 ] new_exchanges = [ x for x in target [ 'exchanges' ] if x [ 'input' ] != production_exchange ] target [ 'exchanges' ] = new_exchanges self . parameter_scan ( ) return True | Remove a link between two processes |
6,368 | def generate_parameter_set_excel_file ( self ) : parameter_sets = self . parameter_sets p_set = [ ] filename = "ParameterSet_{}_input_file.xlsx" . format ( self . name ) if self . save_option == 'curdir' : base_dir = os . getcwd ( ) else : base_dir = os . path . join ( storage . simapro_dir , self . name . replace ( " " , "_" ) ) if not os . path . isdir ( base_dir ) : os . mkdir ( base_dir ) p_set_name = os . path . join ( base_dir , filename ) p = self . params for k in p . keys ( ) : if p [ k ] [ 'function' ] is None : base_dict = { 'id' : k , 'name' : p [ k ] [ 'description' ] , 'unit' : p [ k ] [ 'unit' ] } for s in parameter_sets . keys ( ) : base_dict [ s ] = parameter_sets [ s ] [ k ] p_set . append ( base_dict ) else : pass for e in self . ext_params : base_dict = { 'id' : '{}' . format ( e [ 'name' ] ) , 'type' : 'external' , 'name' : e [ 'description' ] , 'unit' : '' } for s in parameter_sets . keys ( ) : base_dict [ s ] = parameter_sets [ s ] [ e [ 'name' ] ] p_set . append ( base_dict ) df = pd . DataFrame ( p_set ) with pd . ExcelWriter ( p_set_name , engine = 'xlsxwriter' ) as writer : ps_columns = [ k for k in parameter_sets . keys ( ) ] my_columns = [ 'name' , 'unit' , 'id' ] my_columns . extend ( ps_columns ) df . to_excel ( writer , sheet_name = self . name , columns = my_columns , index = False , merge_cells = False ) return p_set_name | Generate an excel file containing the parameter sets in a format you can import into SimaPro Developer . |
6,369 | def add_parameter ( self , param_name , description = None , default = 0 , unit = None ) : if description is None : description = "Parameter called {}" . format ( param_name ) if unit is None : unit = "-" name_check = lambda x : x [ 'name' ] == param_name name_check_list = list ( filter ( name_check , self . ext_params ) ) if len ( name_check_list ) == 0 : self . ext_params . append ( { 'name' : param_name , 'description' : description , 'default' : default , 'unit' : unit } ) else : print ( '{} already exists - choose a different name' . format ( param_name ) ) | Add a global parameter to the database that can be accessed by functions |
6,370 | def list_parameters_as_df ( self ) : to_df = [ ] for i , e in enumerate ( self . ext_params ) : row = { } row [ 'id' ] = e [ 'name' ] row [ 'coords' ] = "n/a" row [ 'description' ] = e [ 'description' ] row [ 'function' ] = "n/a" to_df . append ( row ) for pk in self . params : p = self . params [ pk ] row = { } row [ 'id' ] = pk row [ 'coords' ] = p [ 'coords' ] row [ 'description' ] = p [ 'description' ] row [ 'function' ] = p [ 'function' ] to_df . append ( row ) df = pd . DataFrame ( to_df ) return df | Only really useful when running from a jupyter notebook . |
6,371 | def import_external_db ( self , db_file , db_type = None ) : db = pickle . load ( open ( "{}.pickle" . format ( db_file ) , "rb" ) ) name = list ( db . keys ( ) ) [ 0 ] [ 0 ] new_db = { 'items' : db , 'name' : name } self . external_databases . append ( new_db ) if db_type is None : db_type = 'technosphere' if db_type == 'technosphere' : self . technosphere_databases . append ( name ) elif db_type == 'biosphere' : self . biosphere_databases . append ( name ) else : raise Exception print ( "Database type must be 'technosphere' or 'biosphere'" ) | Import an external database for use in lcopt |
6,372 | def search_databases ( self , search_term , location = None , markets_only = False , databases_to_search = None , allow_internal = False ) : dict_list = [ ] if allow_internal : internal_dict = { } for k , v in self . database [ 'items' ] . items ( ) : if v . get ( 'lcopt_type' ) == 'intermediate' : internal_dict [ k ] = v dict_list . append ( internal_dict ) if databases_to_search is None : dict_list += [ x [ 'items' ] for x in self . external_databases ] else : dict_list += [ x [ 'items' ] for x in self . external_databases if x [ 'name' ] in databases_to_search ] data = Dictionaries ( * dict_list ) query = Query ( ) if markets_only : market_filter = Filter ( "name" , "has" , "market for" ) query . add ( market_filter ) if location is not None : location_filter = Filter ( "location" , "is" , location ) query . add ( location_filter ) query . add ( Filter ( "name" , "ihas" , search_term ) ) result = query ( data ) return result | Search external databases linked to your lcopt model . |
6,373 | def export_to_bw2 ( self ) : my_exporter = Bw2Exporter ( self ) name , bw2db = my_exporter . export_to_bw2 ( ) return name , bw2db | Export the lcopt model in the native brightway 2 format |
6,374 | def analyse ( self , demand_item , demand_item_code ) : my_analysis = Bw2Analysis ( self ) self . result_set = my_analysis . run_analyses ( demand_item , demand_item_code , ** self . analysis_settings ) return True | Run the analyis of the model Doesn t return anything but creates a new item LcoptModel . result_set containing the results |
6,375 | def locate ( name , ** kwargs ) : ctx = Context ( ** kwargs ) ctx . execute_action ( 'locate' , ** { 'name' : name , 'locator' : ctx . locator , } ) | Show resolve information about specified service . |
6,376 | def routing ( name , ** kwargs ) : ctx = Context ( ** kwargs ) ctx . execute_action ( 'routing' , ** { 'name' : name , 'locator' : ctx . locator , } ) | Show information about the requested routing group . |
6,377 | def cluster ( resolve , ** kwargs ) : ctx = Context ( ** kwargs ) ctx . execute_action ( 'cluster' , ** { 'locator' : ctx . locator , 'resolve' : resolve , } ) | Show cluster info . |
6,378 | def info ( name , m , p , b , w , ** kwargs ) : m = ( m << 1 ) & 0b010 p = ( p << 2 ) & 0b100 if b : flags = 0b000 else : flags = m | p | 0b001 ctx = Context ( ** kwargs ) ctx . execute_action ( 'info' , ** { 'node' : ctx . repo . create_secure_service ( 'node' ) , 'locator' : ctx . locator , 'name' : name , 'flags' : flags , 'use_wildcard' : w , 'timeout' : ctx . timeout , } ) | Show information about cocaine runtime . |
6,379 | def metrics ( ty , query , query_type , ** kwargs ) : ctx = Context ( ** kwargs ) ctx . execute_action ( 'metrics' , ** { 'metrics' : ctx . repo . create_secure_service ( 'metrics' ) , 'ty' : ty , 'query' : query , 'query_type' : query_type , } ) | Outputs runtime metrics collected from cocaine - runtime and its services . |
6,380 | def app_list ( ** kwargs ) : ctx = Context ( ** kwargs ) ctx . execute_action ( 'app:list' , ** { 'storage' : ctx . repo . create_secure_service ( 'storage' ) , } ) | Show uploaded applications . |
6,381 | def app_view ( name , ** kwargs ) : ctx = Context ( ** kwargs ) ctx . execute_action ( 'app:view' , ** { 'storage' : ctx . repo . create_secure_service ( 'storage' ) , 'name' : name , } ) | Show manifest content for an application . |
6,382 | def app_import ( path , name , manifest , container_url , docker_address , registry , ** kwargs ) : lower_limit = 120.0 ctx = Context ( ** kwargs ) if ctx . timeout < lower_limit : ctx . timeout = lower_limit log . info ( 'shifted timeout to the %.2fs' , ctx . timeout ) if container_url and docker_address : ctx . execute_action ( 'app:import-docker' , ** { 'storage' : ctx . repo . create_secure_service ( 'storage' ) , 'path' : path , 'name' : name , 'manifest' : manifest , 'container' : container_url , 'address' : docker_address , 'registry' : registry } ) else : raise ValueError ( "both `container_url` and `docker_address` options must not be empty" ) | Import application Docker container . |
6,383 | def app_remove ( name , ** kwargs ) : ctx = Context ( ** kwargs ) ctx . execute_action ( 'app:remove' , ** { 'storage' : ctx . repo . create_secure_service ( 'storage' ) , 'name' : name , } ) | Remove application from storage . |
6,384 | def app_start ( name , profile , ** kwargs ) : ctx = Context ( ** kwargs ) ctx . execute_action ( 'app:start' , ** { 'node' : ctx . repo . create_secure_service ( 'node' ) , 'name' : name , 'profile' : profile } ) | Start an application with specified profile . |
6,385 | def app_restart ( name , profile , ** kwargs ) : ctx = Context ( ** kwargs ) ctx . execute_action ( 'app:restart' , ** { 'node' : ctx . repo . create_secure_service ( 'node' ) , 'locator' : ctx . locator , 'name' : name , 'profile' : profile , } ) | Restart application . |
6,386 | def check ( name , ** kwargs ) : ctx = Context ( ** kwargs ) ctx . execute_action ( 'app:check' , ** { 'node' : ctx . repo . create_secure_service ( 'node' ) , 'name' : name , } ) | Check application status . |
6,387 | def profile_list ( ** kwargs ) : ctx = Context ( ** kwargs ) ctx . execute_action ( 'profile:list' , ** { 'storage' : ctx . repo . create_secure_service ( 'storage' ) , } ) | Show uploaded profiles . |
6,388 | def profile_view ( name , ** kwargs ) : ctx = Context ( ** kwargs ) ctx . execute_action ( 'profile:view' , ** { 'storage' : ctx . repo . create_secure_service ( 'storage' ) , 'name' : name , } ) | Show profile configuration content . |
6,389 | def profile_remove ( name , ** kwargs ) : ctx = Context ( ** kwargs ) ctx . execute_action ( 'profile:remove' , ** { 'storage' : ctx . repo . create_secure_service ( 'storage' ) , 'name' : name , } ) | Remove profile from the storage . |
6,390 | def runlist_list ( ** kwargs ) : ctx = Context ( ** kwargs ) ctx . execute_action ( 'runlist:list' , ** { 'storage' : ctx . repo . create_secure_service ( 'storage' ) , } ) | Show uploaded runlists . |
6,391 | def runlist_view ( name , ** kwargs ) : ctx = Context ( ** kwargs ) ctx . execute_action ( 'runlist:view' , ** { 'storage' : ctx . repo . create_secure_service ( 'storage' ) , 'name' : name } ) | Show configuration content for a specified runlist . |
6,392 | def runlist_upload ( name , runlist , ** kwargs ) : ctx = Context ( ** kwargs ) ctx . execute_action ( 'runlist:upload' , ** { 'storage' : ctx . repo . create_secure_service ( 'storage' ) , 'name' : name , 'runlist' : runlist , } ) | Upload runlist with context into the storage . |
6,393 | def runlist_create ( name , ** kwargs ) : ctx = Context ( ** kwargs ) ctx . execute_action ( 'runlist:create' , ** { 'storage' : ctx . repo . create_secure_service ( 'storage' ) , 'name' : name , } ) | Create runlist and upload it into the storage . |
6,394 | def runlist_remove ( name , ** kwargs ) : ctx = Context ( ** kwargs ) ctx . execute_action ( 'runlist:remove' , ** { 'storage' : ctx . repo . create_secure_service ( 'storage' ) , 'name' : name , } ) | Remove runlist from the storage . |
6,395 | def runlist_add_app ( name , app , profile , force , ** kwargs ) : ctx = Context ( ** kwargs ) ctx . execute_action ( 'runlist:add-app' , ** { 'storage' : ctx . repo . create_secure_service ( 'storage' ) , 'name' : name , 'app' : app , 'profile' : profile , 'force' : force } ) | Add specified application with profile to the specified runlist . |
6,396 | def crashlog_status ( ** kwargs ) : ctx = Context ( ** kwargs ) ctx . execute_action ( 'crashlog:status' , ** { 'storage' : ctx . repo . create_secure_service ( 'storage' ) , } ) | Show crashlogs status . |
6,397 | def crashlog_list ( name , day , ** kwargs ) : ctx = Context ( ** kwargs ) ctx . execute_action ( 'crashlog:list' , ** { 'storage' : ctx . repo . create_secure_service ( 'storage' ) , 'name' : name , 'day_string' : day , } ) | Show crashlogs list for application . |
6,398 | def crashlog_view ( name , timestamp , ** kwargs ) : ctx = Context ( ** kwargs ) ctx . execute_action ( 'crashlog:view' , ** { 'storage' : ctx . repo . create_secure_service ( 'storage' ) , 'name' : name , 'timestamp' : timestamp , } ) | Show crashlog for application with specified timestamp . |
6,399 | def crashlog_removeall ( name , ** kwargs ) : ctx = Context ( ** kwargs ) ctx . execute_action ( 'crashlog:removeall' , ** { 'storage' : ctx . repo . create_secure_service ( 'storage' ) , 'name' : name , } ) | Remove all crashlogs for application from the storage . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.