idx
int64 0
63k
| question
stringlengths 53
5.28k
| target
stringlengths 5
805
|
|---|---|---|
2,700
|
def get_action_cache_key ( name , argument ) : tokens = [ str ( name ) ] if argument : tokens . append ( str ( argument ) ) return '::' . join ( tokens )
|
Get an action cache key string .
|
2,701
|
def removed_or_inserted_action ( mapper , connection , target ) : current_access . delete_action_cache ( get_action_cache_key ( target . action , target . argument ) )
|
Remove the action from cache when an item is inserted or deleted .
|
2,702
|
def changed_action ( mapper , connection , target ) : action_history = get_history ( target , 'action' ) argument_history = get_history ( target , 'argument' ) owner_history = get_history ( target , 'user' if isinstance ( target , ActionUsers ) else 'role' if isinstance ( target , ActionRoles ) else 'role_name' ) if action_history . has_changes ( ) or argument_history . has_changes ( ) or owner_history . has_changes ( ) : current_access . delete_action_cache ( get_action_cache_key ( target . action , target . argument ) ) current_access . delete_action_cache ( get_action_cache_key ( action_history . deleted [ 0 ] if action_history . deleted else target . action , argument_history . deleted [ 0 ] if argument_history . deleted else target . argument ) )
|
Remove the action from cache when an item is updated .
|
2,703
|
def allow ( cls , action , ** kwargs ) : return cls . create ( action , exclude = False , ** kwargs )
|
Allow the given action need .
|
2,704
|
def deny ( cls , action , ** kwargs ) : return cls . create ( action , exclude = True , ** kwargs )
|
Deny the given action need .
|
2,705
|
def query_by_action ( cls , action , argument = None ) : query = cls . query . filter_by ( action = action . value ) argument = argument or getattr ( action , 'argument' , None ) if argument is not None : query = query . filter ( db . or_ ( cls . argument == str ( argument ) , cls . argument . is_ ( None ) , ) ) else : query = query . filter ( cls . argument . is_ ( None ) ) return query
|
Prepare query object with filtered action .
|
2,706
|
def predict_mhci_binding ( job , peptfile , allele , peplen , univ_options , mhci_options ) : work_dir = os . getcwd ( ) input_files = { 'peptfile.faa' : peptfile } input_files = get_files_from_filestore ( job , input_files , work_dir , docker = True ) peptides = read_peptide_file ( os . path . join ( os . getcwd ( ) , 'peptfile.faa' ) ) if not peptides : return job . fileStore . writeGlobalFile ( job . fileStore . getLocalTempFile ( ) ) parameters = [ mhci_options [ 'pred' ] , allele , peplen , input_files [ 'peptfile.faa' ] ] with open ( '/' . join ( [ work_dir , 'predictions.tsv' ] ) , 'w' ) as predfile : docker_call ( tool = 'mhci' , tool_parameters = parameters , work_dir = work_dir , dockerhub = univ_options [ 'dockerhub' ] , outfile = predfile , interactive = True , tool_version = mhci_options [ 'version' ] ) output_file = job . fileStore . writeGlobalFile ( predfile . name ) job . fileStore . logToMaster ( 'Ran mhci on %s:%s:%s successfully' % ( univ_options [ 'patient' ] , allele , peplen ) ) return output_file
|
Predict binding for each peptide in peptfile to allele using the IEDB mhci binding prediction tool .
|
2,707
|
def iter_and_close ( file_like , block_size ) : while 1 : try : block = file_like . read ( block_size ) if block : yield block else : raise StopIteration except StopIteration : file_like . close ( ) return
|
Yield file contents by block then close the file .
|
2,708
|
def cling_wrap ( package_name , dir_name , ** kw ) : resource = Requirement . parse ( package_name ) return Cling ( resource_filename ( resource , dir_name ) , ** kw )
|
Return a Cling that serves from the given package and dir_name .
|
2,709
|
def _is_under_root ( self , full_path ) : if ( path . abspath ( full_path ) + path . sep ) . startswith ( path . abspath ( self . root ) + path . sep ) : return True else : return False
|
Guard against arbitrary file retrieval .
|
2,710
|
def _match_magic ( self , full_path ) : for magic in self . magics : if magic . matches ( full_path ) : return magic
|
Return the first magic that matches this path or None .
|
2,711
|
def _full_path ( self , path_info ) : full_path = self . root + path_info if path . exists ( full_path ) : return full_path else : for magic in self . magics : if path . exists ( magic . new_path ( full_path ) ) : return magic . new_path ( full_path ) else : return full_path
|
Return the full path from which to read .
|
2,712
|
def _guess_type ( self , full_path ) : magic = self . _match_magic ( full_path ) if magic is not None : return ( mimetypes . guess_type ( magic . old_path ( full_path ) ) [ 0 ] or 'text/plain' ) else : return mimetypes . guess_type ( full_path ) [ 0 ] or 'text/plain'
|
Guess the mime type magically or using the mimetypes module .
|
2,713
|
def _conditions ( self , full_path , environ ) : magic = self . _match_magic ( full_path ) if magic is not None : return magic . conditions ( full_path , environ ) else : mtime = stat ( full_path ) . st_mtime return str ( mtime ) , rfc822 . formatdate ( mtime )
|
Return Etag and Last - Modified values defaults to now for both .
|
2,714
|
def _file_like ( self , full_path ) : magic = self . _match_magic ( full_path ) if magic is not None : return magic . file_like ( full_path , self . encoding ) else : return open ( full_path , 'rb' )
|
Return the appropriate file object .
|
2,715
|
def old_path ( self , full_path ) : if self . matches ( full_path ) : return full_path [ : - len ( self . extension ) ] else : raise MagicError ( "Path does not match this magic." )
|
Remove self . extension from path or raise MagicError .
|
2,716
|
def body ( self , environ , file_like ) : variables = environ . copy ( ) variables . update ( self . variables ) template = string . Template ( file_like . read ( ) ) if self . safe is True : return [ template . safe_substitute ( variables ) ] else : return [ template . substitute ( variables ) ]
|
Pass environ and self . variables in to template .
|
2,717
|
def get_rate_for ( self , currency : str , to : str , reverse : bool = False ) -> Number : if currency . upper ( ) == to . upper ( ) : return self . _format_number ( '1.0' ) base , quote = currency , to if reverse : base , quote = to , currency try : rate = self . _get_rate ( base , quote ) except Exception as e : raise ConverterRateError ( self . name ) from e rate = self . _format_number ( rate ) try : assert isinstance ( rate , ( float , Decimal ) ) assert rate > 0 except AssertionError as e : raise ConverterValidationError ( self . name , rate ) from e if reverse : return self . _format_number ( '1.0' ) / rate return rate
|
Get current market rate for currency
|
2,718
|
def convert ( self , amount : Number , currency : str , to : str , reverse : bool = False ) -> Number : rate = self . get_rate_for ( currency , to , reverse ) if self . return_decimal : amount = Decimal ( amount ) return amount * rate
|
Convert amount to another currency
|
2,719
|
def convert_money ( self , money : Money , to : str , reverse : bool = False ) -> Money : converted = self . convert ( money . amount , money . currency , to , reverse ) return Money ( converted , to )
|
Convert money to another currency
|
2,720
|
def add_icon_widget ( self , ref , x = 1 , y = 1 , name = "heart" ) : if ref not in self . widgets : widget = IconWidget ( screen = self , ref = ref , x = x , y = y , name = name ) self . widgets [ ref ] = widget return self . widgets [ ref ]
|
Add Icon Widget
|
2,721
|
def add_scroller_widget ( self , ref , left = 1 , top = 1 , right = 20 , bottom = 1 , direction = "h" , speed = 1 , text = "Message" ) : if ref not in self . widgets : widget = ScrollerWidget ( screen = self , ref = ref , left = left , top = top , right = right , bottom = bottom , direction = direction , speed = speed , text = text ) self . widgets [ ref ] = widget return self . widgets [ ref ]
|
Add Scroller Widget
|
2,722
|
def install_dir ( self ) : max_len = 500 directory = self . _get_str ( self . _iface . get_install_dir , [ self . app_id ] , max_len = max_len ) if not directory : directory = self . _get_str ( self . _iface_list . get_install_dir , [ self . app_id ] , max_len = max_len ) return directory
|
Returns application installation path .
|
2,723
|
def purchase_time ( self ) : ts = self . _iface . get_purchase_time ( self . app_id ) return datetime . utcfromtimestamp ( ts )
|
Date and time of app purchase .
|
2,724
|
def get_args ( self ) : parser = argparse . ArgumentParser ( description = self . _desc , formatter_class = MyUniversalHelpFormatter ) if self . _no_clean : parser . add_argument ( '--no-clean' , action = 'store_true' , help = 'If this flag is used, temporary work directory is not ' 'cleaned.' ) if self . _resume : parser . add_argument ( '--resume' , action = 'store_true' , help = 'If this flag is used, a previously uncleaned workflow in the' ' same directory will be resumed' ) return parser
|
Use this context manager to add arguments to an argparse object with the add_argument method . Arguments must be defined before the command is defined . Note that no - clean and resume are added upon exit and should not be added in the context manager . For more info about these default arguments see below .
|
2,725
|
def wrap_rankboost ( job , rsem_files , merged_mhc_calls , transgene_out , univ_options , rankboost_options ) : rankboost = job . addChildJobFn ( boost_ranks , rsem_files [ 'rsem.isoforms.results' ] , merged_mhc_calls , transgene_out , univ_options , rankboost_options ) return rankboost . rv ( )
|
A wrapper for boost_ranks .
|
2,726
|
def _path_from_module ( module ) : paths = list ( getattr ( module , '__path__' , [ ] ) ) if len ( paths ) != 1 : filename = getattr ( module , '__file__' , None ) if filename is not None : paths = [ os . path . dirname ( filename ) ] else : paths = list ( set ( paths ) ) if len ( paths ) > 1 : raise ImproperlyConfigured ( "The bot module %r has multiple filesystem locations (%r); " "you must configure this bot with an AppConfig subclass " "with a 'path' class attribute." % ( module , paths ) ) elif not paths : raise ImproperlyConfigured ( "The bot module %r has no filesystem location, " "you must configure this bot with an AppConfig subclass " "with a 'path' class attribute." % ( module , ) ) return paths [ 0 ]
|
Attempt to determine bot s filesystem path from its module .
|
2,727
|
def create ( cls , entry ) : try : module = import_module ( entry ) except ImportError : module = None mod_path , _ , cls_name = entry . rpartition ( '.' ) if not mod_path : raise else : try : entry = module . default_bot except AttributeError : return cls ( f'{entry}.Bot' , module ) else : mod_path , _ , cls_name = entry . rpartition ( '.' ) mod = import_module ( mod_path ) try : bot_cls = getattr ( mod , cls_name ) except AttributeError : if module is None : import_module ( entry ) raise if not issubclass ( bot_cls , Bot ) : raise ImproperlyConfigured ( "'%s' isn't a subclass of Bot." % entry ) return cls ( entry , mod , bot_cls . label )
|
Factory that creates an bot config from an entry in INSTALLED_APPS .
|
2,728
|
def get_config ( self , config_name , require_ready = True ) : if require_ready : self . bots . check_configs_ready ( ) else : self . bots . check_bots_ready ( ) return self . configs . get ( config_name . lower ( ) , { } )
|
Return the config with the given case - insensitive config_name . Raise LookupError if no config exists with this name .
|
2,729
|
def get_configs ( self ) : self . bots . check_models_ready ( ) for config in self . configs . values ( ) : yield config
|
Return an iterable of models .
|
2,730
|
def populate ( self , installed_bots = None ) : if self . ready : return with self . _lock : if self . ready : return if self . loading : raise RuntimeError ( "populate() isn't re-entrant" ) self . loading = True for entry in installed_bots or { } : if isinstance ( entry , Bot ) : cls = entry entry = '.' . join ( [ cls . __module__ , cls . __name__ ] ) bot_reg = BotRegistry . create ( entry ) if bot_reg . label in self . bots : raise ImproperlyConfigured ( "Bot labels aren't unique, " "duplicates: %s" % bot_reg . label ) self . bots [ bot_reg . label ] = bot_reg bot_reg . bots = self counts = Counter ( bot_reg . name for bot_reg in self . bots . values ( ) ) duplicates = [ name for name , count in counts . most_common ( ) if count > 1 ] if duplicates : raise ImproperlyConfigured ( "Bot names aren't unique, " "duplicates: %s" % ", " . join ( duplicates ) ) self . bots_ready = True for bot in self . bots . values ( ) : bot . import_configs ( ) self . configs_ready = True self . ready = True
|
Load bots . Import each bot module . It is thread - safe and idempotent but not re - entrant .
|
2,731
|
def get_bot ( self , bot_label ) : self . check_bots_ready ( ) try : return self . bots [ bot_label ] except KeyError : message = "No installed bot with label '%s'." % bot_label for bot_cls in self . get_bots ( ) : if bot_cls . name == bot_label : message += " Did you mean '%s'?" % bot_cls . label break raise LookupError ( message )
|
Import all bots and returns a bot class for the given label . Raise LookupError if no bot exists with this label .
|
2,732
|
def get_configs ( self ) : self . check_configs_ready ( ) result = [ ] for bot in self . bots . values ( ) : result . extend ( list ( bot . get_models ( ) ) ) return result
|
Return a list of all installed configs .
|
2,733
|
def get_config ( self , bot_label , config_name = None , require_ready = True ) : if require_ready : self . check_configs_ready ( ) else : self . check_bots_ready ( ) if config_name is None : config_name = defaults . BOT_CONFIG bot = self . get_bot ( bot_label ) if not require_ready and bot . configs is None : bot . import_configs ( ) return bot . get_config ( config_name , require_ready = require_ready )
|
Return the config matching the given bot_label and config_name . config_name is case - insensitive . Raise LookupError if no bot exists with this label or no config exists with this name in the bot . Raise ValueError if called with a single argument that doesn t contain exactly one dot .
|
2,734
|
def __to_float ( val , digits ) : try : return round ( float ( val ) , digits ) except ( ValueError , TypeError ) : return float ( 0 )
|
Convert val into float with digits decimal .
|
2,735
|
def get_json_data ( latitude = 52.091579 , longitude = 5.119734 ) : final_result = { SUCCESS : False , MESSAGE : None , CONTENT : None , RAINCONTENT : None } log . info ( "Getting buienradar json data for latitude=%s, longitude=%s" , latitude , longitude ) result = __get_ws_data ( ) if result [ SUCCESS ] : final_result [ CONTENT ] = result [ CONTENT ] final_result [ SUCCESS ] = True else : if STATUS_CODE in result and MESSAGE in result : msg = "Status: %d, Msg: %s" % ( result [ STATUS_CODE ] , result [ MESSAGE ] ) elif MESSAGE in result : msg = "Msg: %s" % ( result [ MESSAGE ] ) else : msg = "Something went wrong (reason unknown)." log . warning ( msg ) final_result [ MESSAGE ] = msg result = __get_precipfc_data ( latitude , longitude ) if result [ SUCCESS ] : final_result [ RAINCONTENT ] = result [ CONTENT ] else : if STATUS_CODE in result and MESSAGE in result : msg = "Status: %d, Msg: %s" % ( result [ STATUS_CODE ] , result [ MESSAGE ] ) elif MESSAGE in result : msg = "Msg: %s" % ( result [ MESSAGE ] ) else : msg = "Something went wrong (reason unknown)." log . warning ( msg ) final_result [ MESSAGE ] = msg return final_result
|
Get buienradar json data and return results .
|
2,736
|
def __get_precipfc_data ( latitude , longitude ) : url = 'https://gpsgadget.buienradar.nl/data/raintext?lat={}&lon={}' url = url . format ( round ( latitude , 2 ) , round ( longitude , 2 ) ) result = __get_url ( url ) return result
|
Get buienradar forecasted precipitation .
|
2,737
|
def __get_url ( url ) : log . info ( "Retrieving weather data (%s)..." , url ) result = { SUCCESS : False , MESSAGE : None } try : r = requests . get ( url ) result [ STATUS_CODE ] = r . status_code result [ HEADERS ] = r . headers result [ CONTENT ] = r . text if ( 200 == r . status_code ) : result [ SUCCESS ] = True else : result [ MESSAGE ] = "Got http statuscode: %d." % ( r . status_code ) return result except requests . RequestException as ose : result [ MESSAGE ] = 'Error getting url data. %s' % ose log . error ( result [ MESSAGE ] ) return result
|
Load json data from url and return result .
|
2,738
|
def __parse_ws_data ( jsondata , latitude = 52.091579 , longitude = 5.119734 ) : log . info ( "Parse ws data: latitude: %s, longitude: %s" , latitude , longitude ) result = { SUCCESS : False , MESSAGE : None , DATA : None } loc_data = __select_nearest_ws ( jsondata , latitude , longitude ) if not loc_data : result [ MESSAGE ] = 'No location selected.' return result if not __is_valid ( loc_data ) : result [ MESSAGE ] = 'Location data is invalid.' return result log . debug ( "Raw location data: %s" , loc_data ) result [ DISTANCE ] = __get_ws_distance ( loc_data , latitude , longitude ) result = __parse_loc_data ( loc_data , result ) try : fc_data = jsondata [ __FORECAST ] [ __FIVEDAYFORECAST ] except ( json . JSONDecodeError , KeyError ) : result [ MESSAGE ] = 'Unable to extract forecast data.' log . exception ( result [ MESSAGE ] ) return result if fc_data : log . debug ( "Raw forecast data: %s" , fc_data ) result [ DATA ] [ FORECAST ] = __parse_fc_data ( fc_data ) return result
|
Parse the buienradar json and rain data .
|
2,739
|
def __parse_loc_data ( loc_data , result ) : result [ DATA ] = { ATTRIBUTION : ATTRIBUTION_INFO , FORECAST : [ ] , PRECIPITATION_FORECAST : None } for key , [ value , func ] in SENSOR_TYPES . items ( ) : result [ DATA ] [ key ] = None try : sens_data = loc_data [ value ] if key == CONDITION : desc = loc_data [ __WEATHERDESCRIPTION ] result [ DATA ] [ CONDITION ] = __cond_from_desc ( desc ) result [ DATA ] [ CONDITION ] [ IMAGE ] = loc_data [ __ICONURL ] continue if key == STATIONNAME : result [ DATA ] [ key ] = __getStationName ( loc_data [ __STATIONNAME ] , loc_data [ __STATIONID ] ) continue if func is not None : result [ DATA ] [ key ] = func ( sens_data ) else : result [ DATA ] [ key ] = sens_data except KeyError : if result [ MESSAGE ] is None : result [ MESSAGE ] = "Missing key(s) in br data: " result [ MESSAGE ] += "%s " % value log . warning ( "Data element with key='%s' " "not loaded from br data!" , key ) result [ SUCCESS ] = True return result
|
Parse the json data from selected weatherstation .
|
2,740
|
def __parse_fc_data ( fc_data ) : fc = [ ] for day in fc_data : fcdata = { CONDITION : __cond_from_desc ( __get_str ( day , __WEATHERDESCRIPTION ) ) , TEMPERATURE : __get_float ( day , __MAXTEMPERATURE ) , MIN_TEMP : __get_float ( day , __MINTEMPERATURE ) , MAX_TEMP : __get_float ( day , __MAXTEMPERATURE ) , SUN_CHANCE : __get_int ( day , __SUNCHANCE ) , RAIN_CHANCE : __get_int ( day , __RAINCHANCE ) , RAIN : __get_float ( day , __MMRAINMAX ) , MIN_RAIN : __get_float ( day , __MMRAINMIN ) , MAX_RAIN : __get_float ( day , __MMRAINMAX ) , SNOW : 0 , WINDFORCE : __get_int ( day , __WIND ) , WINDDIRECTION : __get_str ( day , __WINDDIRECTION ) , DATETIME : __to_localdatetime ( __get_str ( day , __DAY ) ) , } fcdata [ CONDITION ] [ IMAGE ] = day [ __ICONURL ] fc . append ( fcdata ) return fc
|
Parse the forecast data from the json section .
|
2,741
|
def __get_float ( section , name ) : try : return float ( section [ name ] ) except ( ValueError , TypeError , KeyError ) : return float ( 0 )
|
Get the forecasted float from json section .
|
2,742
|
def __parse_precipfc_data ( data , timeframe ) : result = { AVERAGE : None , TOTAL : None , TIMEFRAME : None } log . debug ( "Precipitation data: %s" , data ) lines = data . splitlines ( ) index = 1 totalrain = 0 numberoflines = 0 nrlines = min ( len ( lines ) , round ( float ( timeframe ) / 5 ) + 1 ) while index < nrlines : line = lines [ index ] log . debug ( "__parse_precipfc_data: line: %s" , line ) ( val , key ) = line . split ( "|" ) mmu = 10 ** ( float ( ( int ( val ) - 109 ) ) / 32 ) totalrain = totalrain + float ( mmu ) numberoflines = numberoflines + 1 index += 1 if numberoflines > 0 : result [ AVERAGE ] = round ( ( totalrain / numberoflines ) , 2 ) else : result [ AVERAGE ] = 0 result [ TOTAL ] = round ( totalrain / 12 , 2 ) result [ TIMEFRAME ] = timeframe return result
|
Parse the forecasted precipitation data .
|
2,743
|
def __cond_from_desc ( desc ) : for code , [ condition , detailed , exact , exact_nl ] in __BRCONDITIONS . items ( ) : if exact_nl == desc : return { CONDCODE : code , CONDITION : condition , DETAILED : detailed , EXACT : exact , EXACTNL : exact_nl } return None
|
Get the condition name from the condition description .
|
2,744
|
def __get_ws_distance ( wstation , latitude , longitude ) : if wstation : try : wslat = float ( wstation [ __LAT ] ) wslon = float ( wstation [ __LON ] ) dist = vincenty ( ( latitude , longitude ) , ( wslat , wslon ) ) log . debug ( "calc distance: %s (latitude: %s, longitude: " "%s, wslat: %s, wslon: %s)" , dist , latitude , longitude , wslat , wslon ) return dist except ( ValueError , TypeError , KeyError ) : return None else : return None
|
Get the distance to the weatherstation from wstation section of json .
|
2,745
|
def __getStationName ( name , id ) : name = name . replace ( "Meetstation" , "" ) name = name . strip ( ) name += " (%s)" % id return name
|
Construct a staiion name .
|
2,746
|
def as_view ( cls , * args , ** kwargs ) : initkwargs = cls . get_initkwargs ( * args , ** kwargs ) return super ( WizardView , cls ) . as_view ( ** initkwargs )
|
This method is used within urls . py to create unique formwizard instances for every request . We need to override this method because we add some kwargs which are needed to make the formwizard usable .
|
2,747
|
def get_initkwargs ( cls , form_list , initial_dict = None , instance_dict = None , condition_dict = None , * args , ** kwargs ) : kwargs . update ( { 'initial_dict' : initial_dict or { } , 'instance_dict' : instance_dict or { } , 'condition_dict' : condition_dict or { } , } ) init_form_list = SortedDict ( ) assert len ( form_list ) > 0 , 'at least one form is needed' for i , form in enumerate ( form_list ) : if isinstance ( form , ( list , tuple ) ) : init_form_list [ unicode ( form [ 0 ] ) ] = form [ 1 ] else : init_form_list [ unicode ( i ) ] = form for form in init_form_list . itervalues ( ) : if issubclass ( form , formsets . BaseFormSet ) : form = form . form for field in form . base_fields . itervalues ( ) : if ( isinstance ( field , forms . FileField ) and not hasattr ( cls , 'file_storage' ) ) : raise NoFileStorageConfigured kwargs [ 'form_list' ] = init_form_list return kwargs
|
Creates a dict with all needed parameters for the form wizard instances .
|
2,748
|
def dispatch ( self , request , * args , ** kwargs ) : self . wizard_name = self . get_wizard_name ( ) self . prefix = self . get_prefix ( ) self . storage = get_storage ( self . storage_name , self . prefix , request , getattr ( self , 'file_storage' , None ) ) self . steps = StepsHelper ( self ) response = super ( WizardView , self ) . dispatch ( request , * args , ** kwargs ) self . storage . update_response ( response ) return response
|
This method gets called by the routing engine . The first argument is request which contains a HttpRequest instance . The request is stored in self . request for later use . The storage instance is stored in self . storage .
|
2,749
|
def get ( self , request , * args , ** kwargs ) : self . storage . reset ( ) self . storage . current_step = self . steps . first return self . render ( self . get_form ( ) )
|
This method handles GET requests .
|
2,750
|
def post ( self , * args , ** kwargs ) : wizard_prev_step = self . request . POST . get ( 'wizard_prev_step' , None ) if wizard_prev_step and wizard_prev_step in self . get_form_list ( ) : self . storage . current_step = wizard_prev_step form = self . get_form ( data = self . storage . get_step_data ( self . steps . current ) , files = self . storage . get_step_files ( self . steps . current ) ) return self . render ( form ) management_form = ManagementForm ( self . request . POST , prefix = self . prefix ) if not management_form . is_valid ( ) : raise ValidationError ( 'ManagementForm data is missing or has been tampered.' ) form_current_step = management_form . cleaned_data [ 'current_step' ] if ( form_current_step != self . steps . current and self . storage . current_step is not None ) : self . storage . current_step = form_current_step form = self . get_form ( data = self . request . POST , files = self . request . FILES ) if form . is_valid ( ) : self . storage . set_step_data ( self . steps . current , self . process_step ( form ) ) self . storage . set_step_files ( self . steps . current , self . process_step_files ( form ) ) if self . steps . current == self . steps . last : return self . render_done ( form , ** kwargs ) else : return self . render_next_step ( form ) return self . render ( form )
|
This method handles POST requests .
|
2,751
|
def render_done ( self , form , ** kwargs ) : final_form_list = [ ] for form_key in self . get_form_list ( ) : form_obj = self . get_form ( step = form_key , data = self . storage . get_step_data ( form_key ) , files = self . storage . get_step_files ( form_key ) ) if not form_obj . is_valid ( ) : return self . render_revalidation_failure ( form_key , form_obj , ** kwargs ) final_form_list . append ( form_obj ) done_response = self . done ( final_form_list , ** kwargs ) self . storage . reset ( ) return done_response
|
This method gets called when all forms passed . The method should also re - validate all steps to prevent manipulation . If any form don t validate render_revalidation_failure should get called . If everything is fine call done .
|
2,752
|
def get_form_prefix ( self , step = None , form = None ) : if step is None : step = self . steps . current return str ( step )
|
Returns the prefix which will be used when calling the actual form for the given step . step contains the step - name form the form which will be called with the returned prefix .
|
2,753
|
def get_form ( self , step = None , data = None , files = None ) : if step is None : step = self . steps . current kwargs = self . get_form_kwargs ( step ) kwargs . update ( { 'data' : data , 'files' : files , 'prefix' : self . get_form_prefix ( step , self . form_list [ step ] ) , 'initial' : self . get_form_initial ( step ) , } ) if issubclass ( self . form_list [ step ] , forms . ModelForm ) : kwargs . update ( { 'instance' : self . get_form_instance ( step ) } ) elif issubclass ( self . form_list [ step ] , forms . models . BaseModelFormSet ) : kwargs . update ( { 'queryset' : self . get_form_instance ( step ) } ) return self . form_list [ step ] ( ** kwargs )
|
Constructs the form for a given step . If no step is defined the current step will be determined automatically .
|
2,754
|
def render_revalidation_failure ( self , step , form , ** kwargs ) : self . storage . current_step = step return self . render ( form , ** kwargs )
|
Gets called when a form doesn t validate when rendering the done view . By default it changed the current step to failing forms step and renders the form .
|
2,755
|
def get_all_cleaned_data ( self ) : cleaned_data = { } for form_key in self . get_form_list ( ) : form_obj = self . get_form ( step = form_key , data = self . storage . get_step_data ( form_key ) , files = self . storage . get_step_files ( form_key ) ) if form_obj . is_valid ( ) : if isinstance ( form_obj . cleaned_data , ( tuple , list ) ) : cleaned_data . update ( { 'formset-%s' % form_key : form_obj . cleaned_data } ) else : cleaned_data . update ( form_obj . cleaned_data ) return cleaned_data
|
Returns a merged dictionary of all step cleaned_data dictionaries . If a step contains a FormSet the key will be prefixed with formset and contain a list of the formset cleaned_data dictionaries .
|
2,756
|
def get_cleaned_data_for_step ( self , step ) : if step in self . form_list : form_obj = self . get_form ( step = step , data = self . storage . get_step_data ( step ) , files = self . storage . get_step_files ( step ) ) if form_obj . is_valid ( ) : return form_obj . cleaned_data return None
|
Returns the cleaned data for a given step . Before returning the cleaned data the stored values are being revalidated through the form . If the data doesn t validate None will be returned .
|
2,757
|
def get_step_index ( self , step = None ) : if step is None : step = self . steps . current return self . get_form_list ( ) . keyOrder . index ( step )
|
Returns the index for the given step name . If no step is given the current step will be used to get the index .
|
2,758
|
def render ( self , form = None , ** kwargs ) : form = form or self . get_form ( ) context = self . get_context_data ( form , ** kwargs ) return self . render_to_response ( context )
|
Returns a HttpResponse containing a all needed context data .
|
2,759
|
def get_initkwargs ( cls , * args , ** kwargs ) : assert 'url_name' in kwargs , 'URL name is needed to resolve correct wizard URLs' extra_kwargs = { 'done_step_name' : kwargs . pop ( 'done_step_name' , 'done' ) , 'url_name' : kwargs . pop ( 'url_name' ) , } initkwargs = super ( NamedUrlWizardView , cls ) . get_initkwargs ( * args , ** kwargs ) initkwargs . update ( extra_kwargs ) assert initkwargs [ 'done_step_name' ] not in initkwargs [ 'form_list' ] , 'step name "%s" is reserved for "done" view' % initkwargs [ 'done_step_name' ] return initkwargs
|
We require a url_name to reverse URLs later . Additionally users can pass a done_step_name to change the URL name of the done view .
|
2,760
|
def get ( self , * args , ** kwargs ) : step_url = kwargs . get ( 'step' , None ) if step_url is None : if 'reset' in self . request . GET : self . storage . reset ( ) self . storage . current_step = self . steps . first if self . request . GET : query_string = "?%s" % self . request . GET . urlencode ( ) else : query_string = "" next_step_url = reverse ( self . url_name , kwargs = { 'step' : self . steps . current , } ) + query_string return redirect ( next_step_url ) elif step_url == self . done_step_name : last_step = self . steps . last return self . render_done ( self . get_form ( step = last_step , data = self . storage . get_step_data ( last_step ) , files = self . storage . get_step_files ( last_step ) ) , ** kwargs ) elif step_url == self . steps . current : return self . render ( self . get_form ( data = self . storage . current_step_data , files = self . storage . current_step_data , ) , ** kwargs ) elif step_url in self . get_form_list ( ) : self . storage . current_step = step_url return self . render ( self . get_form ( data = self . storage . current_step_data , files = self . storage . current_step_data , ) , ** kwargs ) else : self . storage . current_step = self . steps . first return redirect ( self . url_name , step = self . steps . first )
|
This renders the form or if needed does the http redirects .
|
2,761
|
def post ( self , * args , ** kwargs ) : prev_step = self . request . POST . get ( 'wizard_prev_step' , None ) if prev_step and prev_step in self . get_form_list ( ) : self . storage . current_step = prev_step return redirect ( self . url_name , step = prev_step ) return super ( NamedUrlWizardView , self ) . post ( * args , ** kwargs )
|
Do a redirect if user presses the prev . step button . The rest of this is super d from FormWizard .
|
2,762
|
def render_next_step ( self , form , ** kwargs ) : next_step = self . get_next_step ( ) self . storage . current_step = next_step return redirect ( self . url_name , step = next_step )
|
When using the NamedUrlFormWizard we have to redirect to update the browser s URL to match the shown step .
|
2,763
|
def render_revalidation_failure ( self , failed_step , form , ** kwargs ) : self . storage . current_step = failed_step return redirect ( self . url_name , step = failed_step )
|
When a step fails we have to redirect the user to the first failing step .
|
2,764
|
def get_store ( logger : Logger = None ) -> 'Store' : from trading_bots . conf import settings store_settings = settings . storage store = store_settings . get ( 'name' , 'json' ) if store == 'json' : store = 'trading_bots.core.storage.JSONStore' elif store == 'redis' : store = 'trading_bots.core.storage.RedisStore' store_cls = load_class_by_name ( store ) kwargs = store_cls . configure ( store_settings ) return store_cls ( logger = logger , ** kwargs )
|
Get and configure the storage backend
|
2,765
|
def parse_request_headers ( headers ) : request_header_keys = set ( headers . keys ( lower = True ) ) request_meta_keys = set ( XHEADERS_TO_ARGS_DICT . keys ( ) ) data_header_keys = request_header_keys . intersection ( request_meta_keys ) return dict ( ( [ XHEADERS_TO_ARGS_DICT [ key ] , headers . get ( key , None ) ] for key in data_header_keys ) )
|
convert headers in human readable format
|
2,766
|
def split_docstring ( docstring ) : docstring_list = [ line . strip ( ) for line in docstring . splitlines ( ) ] description_list = list ( takewhile ( lambda line : not ( line . startswith ( ':' ) or line . startswith ( '@inherit' ) ) , docstring_list ) ) description = ' ' . join ( description_list ) . strip ( ) first_field_line_number = len ( description_list ) fields = [ ] if first_field_line_number >= len ( docstring_list ) : return description , fields last_field_lines = [ docstring_list [ first_field_line_number ] ] for line in docstring_list [ first_field_line_number + 1 : ] : if line . strip ( ) . startswith ( ':' ) or line . strip ( ) . startswith ( '@inherit' ) : fields . append ( ' ' . join ( last_field_lines ) ) last_field_lines = [ line ] else : last_field_lines . append ( line ) fields . append ( ' ' . join ( last_field_lines ) ) return description , fields
|
Separates the method s description and paramter s
|
2,767
|
def get_method_docstring ( cls , method_name ) : method = getattr ( cls , method_name , None ) if method is None : return docstrign = inspect . getdoc ( method ) if docstrign is None : for base in cls . __bases__ : docstrign = get_method_docstring ( base , method_name ) if docstrign : return docstrign else : return None return docstrign
|
return method docstring if method docstring is empty we get docstring from parent
|
2,768
|
def condition_from_code ( condcode ) : if condcode in __BRCONDITIONS : cond_data = __BRCONDITIONS [ condcode ] return { CONDCODE : condcode , CONDITION : cond_data [ 0 ] , DETAILED : cond_data [ 1 ] , EXACT : cond_data [ 2 ] , EXACTNL : cond_data [ 3 ] , } return None
|
Get the condition name from the condition code .
|
2,769
|
def validate ( self , ** kwargs ) : try : submission_file_schema = json . load ( open ( self . default_schema_file , 'r' ) ) additional_file_section_schema = json . load ( open ( self . additional_info_schema , 'r' ) ) data = kwargs . pop ( "data" , None ) file_path = kwargs . pop ( "file_path" , None ) if file_path is None : raise LookupError ( "file_path argument must be supplied" ) if data is None : data = yaml . load_all ( open ( file_path , 'r' ) , Loader = Loader ) for data_item_index , data_item in enumerate ( data ) : if data_item is None : continue try : if not data_item_index and 'data_file' not in data_item : validate ( data_item , additional_file_section_schema ) else : validate ( data_item , submission_file_schema ) except ValidationError as ve : self . add_validation_message ( ValidationMessage ( file = file_path , message = ve . message + ' in ' + str ( ve . instance ) ) ) if self . has_errors ( file_path ) : return False else : return True except ScannerError as se : self . add_validation_message ( ValidationMessage ( file = file_path , message = 'There was a problem parsing the file. ' 'This can be because you forgot spaces ' 'after colons in your YAML file for instance. ' 'Diagnostic information follows.\n' + str ( se ) ) ) return False except Exception as e : self . add_validation_message ( ValidationMessage ( file = file_path , message = e . __str__ ( ) ) ) return False
|
Validates a submission file
|
2,770
|
def load_class_by_name ( name : str ) : mod_path , _ , cls_name = name . rpartition ( '.' ) mod = importlib . import_module ( mod_path ) cls = getattr ( mod , cls_name ) return cls
|
Given a dotted path returns the class
|
2,771
|
def load_yaml_file ( file_path : str ) : with codecs . open ( file_path , 'r' ) as f : return yaml . safe_load ( f )
|
Load a YAML file from path
|
2,772
|
def run_itx_resistance_assessment ( job , rsem_files , univ_options , reports_options ) : return job . addChildJobFn ( assess_itx_resistance , rsem_files [ 'rsem.genes.results' ] , univ_options , reports_options ) . rv ( )
|
A wrapper for assess_itx_resistance .
|
2,773
|
def CELERY_RESULT_BACKEND ( self ) : configured = get ( 'CELERY_RESULT_BACKEND' , None ) if configured : return configured if not self . _redis_available ( ) : return None host , port = self . REDIS_HOST , self . REDIS_PORT if host and port : default = "redis://{host}:{port}/{db}" . format ( host = host , port = port , db = self . CELERY_REDIS_RESULT_DB ) return default
|
Redis result backend config
|
2,774
|
def BROKER_TYPE ( self ) : broker_type = get ( 'BROKER_TYPE' , DEFAULT_BROKER_TYPE ) if broker_type not in SUPPORTED_BROKER_TYPES : log . warn ( "Specified BROKER_TYPE {} not supported. Backing to default {}" . format ( broker_type , DEFAULT_BROKER_TYPE ) ) return DEFAULT_BROKER_TYPE else : return broker_type
|
Custom setting allowing switch between rabbitmq redis
|
2,775
|
def BROKER_URL ( self ) : broker_url = get ( 'BROKER_URL' , None ) if broker_url : log . info ( "Using BROKER_URL setting: {}" . format ( broker_url ) ) return broker_url redis_available = self . _redis_available ( ) broker_type = self . BROKER_TYPE if broker_type == 'redis' and not redis_available : log . warn ( "Choosed broker type is redis, but redis not available. \ Check redis package, and REDIS_HOST, REDIS_PORT settings" ) if broker_type == 'redis' and redis_available : return 'redis://{host}:{port}/{db}' . format ( host = self . REDIS_HOST , port = self . REDIS_PORT , db = self . CELERY_REDIS_BROKER_DB ) elif broker_type == 'rabbitmq' : return 'amqp://{user}:{passwd}@{host}:{port}/{vhost}' . format ( user = self . RABBITMQ_USER , passwd = self . RABBITMQ_PASSWD , host = self . RABBITMQ_HOST , port = self . RABBITMQ_PORT , vhost = self . RABBITMQ_VHOST ) else : return DEFAULT_BROKER_URL
|
Sets BROKER_URL depending on redis or rabbitmq settings
|
2,776
|
def traverse_inventory ( self , item_filter = None ) : not self . _intentory_raw and self . _get_inventory_raw ( ) for item in self . _intentory_raw [ 'rgDescriptions' ] . values ( ) : tags = item [ 'tags' ] for tag in tags : internal_name = tag [ 'internal_name' ] if item_filter is None or internal_name == item_filter : item_type = Item if internal_name == TAG_ITEM_CLASS_CARD : item_type = Card appid = item [ 'market_fee_app' ] title = item [ 'name' ] yield item_type ( appid , title )
|
Generates market Item objects for each inventory item .
|
2,777
|
def validate ( self , ** kwargs ) : default_data_schema = json . load ( open ( self . default_schema_file , 'r' ) ) data = kwargs . pop ( "data" , None ) file_path = kwargs . pop ( "file_path" , None ) if file_path is None : raise LookupError ( "file_path argument must be supplied" ) if data is None : try : data = yaml . load ( open ( file_path , 'r' ) , Loader = Loader ) except Exception as e : self . add_validation_message ( ValidationMessage ( file = file_path , message = 'There was a problem parsing the file.\n' + e . __str__ ( ) ) ) return False try : if 'type' in data : custom_schema = self . load_custom_schema ( data [ 'type' ] ) json_validate ( data , custom_schema ) else : json_validate ( data , default_data_schema ) except ValidationError as ve : self . add_validation_message ( ValidationMessage ( file = file_path , message = ve . message + ' in ' + str ( ve . instance ) ) ) if self . has_errors ( file_path ) : return False else : return True
|
Validates a data file
|
2,778
|
def b ( s ) : if sys . version < '3' : if isinstance ( s , unicode ) : return s . encode ( 'utf-8' ) else : return s
|
Conversion to bytes
|
2,779
|
def validatefeatures ( self , features ) : validatedfeatures = [ ] for feature in features : if isinstance ( feature , int ) or isinstance ( feature , float ) : validatedfeatures . append ( str ( feature ) ) elif self . delimiter in feature and not self . sklearn : raise ValueError ( "Feature contains delimiter: " + feature ) elif self . sklearn and isinstance ( feature , str ) : validatedfeatures . append ( feature ) else : validatedfeatures . append ( feature ) return validatedfeatures
|
Returns features in validated form or raises an Exception . Mostly for internal use
|
2,780
|
def addinstance ( self , testfile , features , classlabel = "?" ) : features = self . validatefeatures ( features ) if self . delimiter in classlabel : raise ValueError ( "Class label contains delimiter: " + self . delimiter ) f = io . open ( testfile , 'a' , encoding = self . encoding ) f . write ( self . delimiter . join ( features ) + self . delimiter + classlabel + "\n" ) f . close ( )
|
Adds an instance to a specific file . Especially suitable for generating test files
|
2,781
|
def crossvalidate ( self , foldsfile ) : options = "-F " + self . format + " " + self . timbloptions + " -t cross_validate" print ( "Instantiating Timbl API : " + options , file = stderr ) if sys . version < '3' : self . api = timblapi . TimblAPI ( b ( options ) , b"" ) else : self . api = timblapi . TimblAPI ( options , "" ) if self . debug : print ( "Enabling debug for timblapi" , file = stderr ) self . api . enableDebug ( ) print ( "Calling Timbl Test : " + options , file = stderr ) if sys . version < '3' : self . api . test ( b ( foldsfile ) , b'' , b'' ) else : self . api . test ( u ( foldsfile ) , '' , '' ) a = self . api . getAccuracy ( ) del self . api return a
|
Train & Test using cross validation testfile is a file that contains the filenames of all the folds!
|
2,782
|
def leaveoneout ( self ) : traintestfile = self . fileprefix + '.train' options = "-F " + self . format + " " + self . timbloptions + " -t leave_one_out" if sys . version < '3' : self . api = timblapi . TimblAPI ( b ( options ) , b"" ) else : self . api = timblapi . TimblAPI ( options , "" ) if self . debug : print ( "Enabling debug for timblapi" , file = stderr ) self . api . enableDebug ( ) print ( "Calling Timbl API : " + options , file = stderr ) if sys . version < '3' : self . api . learn ( b ( traintestfile ) ) self . api . test ( b ( traintestfile ) , b ( self . fileprefix + '.out' ) , b'' ) else : self . api . learn ( u ( traintestfile ) ) self . api . test ( u ( traintestfile ) , u ( self . fileprefix + '.out' ) , '' ) return self . api . getAccuracy ( )
|
Train & Test using leave one out
|
2,783
|
def set_action_cache ( self , action_key , data ) : if self . cache : self . cache . set ( self . app . config [ 'ACCESS_ACTION_CACHE_PREFIX' ] + action_key , data )
|
Store action needs and excludes .
|
2,784
|
def get_action_cache ( self , action_key ) : data = None if self . cache : data = self . cache . get ( self . app . config [ 'ACCESS_ACTION_CACHE_PREFIX' ] + action_key ) return data
|
Get action needs and excludes from cache .
|
2,785
|
def delete_action_cache ( self , action_key ) : if self . cache : self . cache . delete ( self . app . config [ 'ACCESS_ACTION_CACHE_PREFIX' ] + action_key )
|
Delete action needs and excludes from cache .
|
2,786
|
def register_action ( self , action ) : assert action . value not in self . actions self . actions [ action . value ] = action
|
Register an action to be showed in the actions list .
|
2,787
|
def register_system_role ( self , system_role ) : assert system_role . value not in self . system_roles self . system_roles [ system_role . value ] = system_role
|
Register a system role .
|
2,788
|
def load_entry_point_system_roles ( self , entry_point_group ) : for ep in pkg_resources . iter_entry_points ( group = entry_point_group ) : self . register_system_role ( ep . load ( ) )
|
Load system roles from an entry point group .
|
2,789
|
def main ( argv = sys . argv [ 1 : ] ) : args = docopt ( __doc__ , argv = argv , version = pkg_resources . require ( 'buienradar' ) [ 0 ] . version ) level = logging . ERROR if args [ '-v' ] : level = logging . INFO if args [ '-v' ] == 2 : level = logging . DEBUG logging . basicConfig ( level = level ) log = logging . getLogger ( __name__ ) log . info ( "Start..." ) latitude = float ( args [ '--latitude' ] ) longitude = float ( args [ '--longitude' ] ) timeframe = int ( args [ '--timeframe' ] ) usexml = False if args [ '--usexml' ] : usexml = True result = get_data ( latitude , longitude , usexml ) if result [ SUCCESS ] : log . debug ( "Retrieved data:\n%s" , result ) result = parse_data ( result [ CONTENT ] , result [ RAINCONTENT ] , latitude , longitude , timeframe , usexml ) log . info ( "result: %s" , result ) print ( result ) else : log . error ( "Retrieving weather data was not successfull (%s)" , result [ MESSAGE ] )
|
Parse argument and start main program .
|
2,790
|
def global_unlock_percent ( self ) : percent = CRef . cfloat ( ) result = self . _iface . get_ach_progress ( self . name , percent ) if not result : return 0.0 return float ( percent )
|
Global achievement unlock percent .
|
2,791
|
def unlocked ( self ) : achieved = CRef . cbool ( ) result = self . _iface . get_ach ( self . name , achieved ) if not result : return False return bool ( achieved )
|
True if achievement is unlocked .
|
2,792
|
def unlock ( self , store = True ) : result = self . _iface . ach_unlock ( self . name ) result and store and self . _store ( ) return result
|
Unlocks the achievement .
|
2,793
|
def __parse_ws_data ( content , latitude = 52.091579 , longitude = 5.119734 ) : log . info ( "Parse ws data: latitude: %s, longitude: %s" , latitude , longitude ) result = { SUCCESS : False , MESSAGE : None , DATA : None } try : xmldata = xmltodict . parse ( content ) [ __BRROOT ] except ( xmltodict . expat . ExpatError , KeyError ) : result [ MESSAGE ] = "Unable to parse content as xml." log . exception ( result [ MESSAGE ] ) return result loc_data = __select_nearest_ws ( xmldata , latitude , longitude ) if not loc_data : result [ MESSAGE ] = 'No location selected.' return result if not __is_valid ( loc_data ) : result [ MESSAGE ] = 'Location data is invalid.' return result log . debug ( "Raw location data: %s" , loc_data ) result [ DISTANCE ] = __get_ws_distance ( loc_data , latitude , longitude ) result = __parse_loc_data ( loc_data , result ) try : fc_data = xmldata [ __BRWEERGEGEVENS ] [ __BRVERWACHTING ] except ( xmltodict . expat . ExpatError , KeyError ) : result [ MESSAGE ] = 'Unable to extract forecast data.' log . exception ( result [ MESSAGE ] ) return result if fc_data : log . debug ( "Raw forecast data: %s" , fc_data ) result [ DATA ] [ FORECAST ] = __parse_fc_data ( fc_data ) return result
|
Parse the buienradar xml and rain data .
|
2,794
|
def __parse_loc_data ( loc_data , result ) : result [ DATA ] = { ATTRIBUTION : ATTRIBUTION_INFO , FORECAST : [ ] , PRECIPITATION_FORECAST : None } for key , [ value , func ] in SENSOR_TYPES . items ( ) : result [ DATA ] [ key ] = None try : from buienradar . buienradar import condition_from_code sens_data = loc_data [ value ] if key == CONDITION : code = sens_data [ __BRID ] [ : 1 ] result [ DATA ] [ CONDITION ] = condition_from_code ( code ) result [ DATA ] [ CONDITION ] [ IMAGE ] = sens_data [ __BRTEXT ] else : if key == STATIONNAME : name = sens_data [ __BRTEXT ] . replace ( "Meetstation" , "" ) name = name . strip ( ) name += " (%s)" % loc_data [ __BRSTATIONCODE ] result [ DATA ] [ key ] = name else : if func is not None : result [ DATA ] [ key ] = func ( sens_data ) else : result [ DATA ] [ key ] = sens_data except KeyError : if result [ MESSAGE ] is None : result [ MESSAGE ] = "Missing key(s) in br data: " result [ MESSAGE ] += "%s " % value log . warning ( "Data element with key='%s' " "not loaded from br data!" , key ) result [ SUCCESS ] = True return result
|
Parse the xml data from selected weatherstation .
|
2,795
|
def __parse_fc_data ( fc_data ) : from buienradar . buienradar import condition_from_code fc = [ ] for daycnt in range ( 1 , 6 ) : daysection = __BRDAYFC % daycnt if daysection in fc_data : tmpsect = fc_data [ daysection ] fcdatetime = datetime . now ( pytz . timezone ( __TIMEZONE ) ) fcdatetime = fcdatetime . replace ( hour = 12 , minute = 0 , second = 0 , microsecond = 0 ) fcdatetime = fcdatetime + timedelta ( days = daycnt ) code = tmpsect . get ( __BRICOON , [ ] ) . get ( __BRID ) fcdata = { CONDITION : condition_from_code ( code ) , TEMPERATURE : __get_float ( tmpsect , __BRMAXTEMP ) , MIN_TEMP : __get_float ( tmpsect , __BRMINTEMP ) , MAX_TEMP : __get_float ( tmpsect , __BRMAXTEMP ) , SUN_CHANCE : __get_int ( tmpsect , __BRKANSZON ) , RAIN_CHANCE : __get_int ( tmpsect , __BRKANSREGEN ) , RAIN : __get_float ( tmpsect , __BRMAXMMREGEN ) , SNOW : __get_float ( tmpsect , __BRSNEEUWCMS ) , WINDFORCE : __get_int ( tmpsect , __BRWINDKRACHT ) , DATETIME : fcdatetime , } fcdata [ CONDITION ] [ IMAGE ] = tmpsect . get ( __BRICOON , [ ] ) . get ( __BRTEXT ) fc . append ( fcdata ) return fc
|
Parse the forecast data from the xml section .
|
2,796
|
def __get_ws_distance ( wstation , latitude , longitude ) : if wstation : try : wslat = float ( wstation [ __BRLAT ] ) wslon = float ( wstation [ __BRLON ] ) dist = vincenty ( ( latitude , longitude ) , ( wslat , wslon ) ) log . debug ( "calc distance: %s (latitude: %s, longitude: " "%s, wslat: %s, wslon: %s)" , dist , latitude , longitude , wslat , wslon ) return dist except ( ValueError , TypeError , KeyError ) : return None else : return None
|
Get the distance to the weatherstation from wstation section of xml .
|
2,797
|
def predict_mhcii_binding ( job , peptfile , allele , univ_options , mhcii_options ) : work_dir = os . getcwd ( ) input_files = { 'peptfile.faa' : peptfile } input_files = get_files_from_filestore ( job , input_files , work_dir , docker = True ) peptides = read_peptide_file ( os . path . join ( os . getcwd ( ) , 'peptfile.faa' ) ) parameters = [ mhcii_options [ 'pred' ] , allele , input_files [ 'peptfile.faa' ] ] if not peptides : return job . fileStore . writeGlobalFile ( job . fileStore . getLocalTempFile ( ) ) , None with open ( '/' . join ( [ work_dir , 'predictions.tsv' ] ) , 'w' ) as predfile : docker_call ( tool = 'mhcii' , tool_parameters = parameters , work_dir = work_dir , dockerhub = univ_options [ 'dockerhub' ] , outfile = predfile , interactive = True , tool_version = mhcii_options [ 'version' ] ) run_netmhciipan = True predictor = None with open ( predfile . name , 'r' ) as predfile : for line in predfile : if not line . startswith ( 'HLA' ) : continue if line . strip ( ) . split ( '\t' ) [ 5 ] == 'NetMHCIIpan' : break elif line . strip ( ) . split ( '\t' ) [ 5 ] == 'Sturniolo' : predictor = 'Sturniolo' else : predictor = 'Consensus' run_netmhciipan = False break if run_netmhciipan : netmhciipan = job . addChildJobFn ( predict_netmhcii_binding , peptfile , allele , univ_options , mhcii_options [ 'netmhciipan' ] , disk = '100M' , memory = '100M' , cores = 1 ) job . fileStore . logToMaster ( 'Ran mhcii on %s:%s successfully' % ( univ_options [ 'patient' ] , allele ) ) return netmhciipan . rv ( ) else : output_file = job . fileStore . writeGlobalFile ( predfile . name ) job . fileStore . logToMaster ( 'Ran mhcii on %s:%s successfully' % ( univ_options [ 'patient' ] , allele ) ) return output_file , predictor
|
Predict binding for each peptide in peptfile to allele using the IEDB mhcii binding prediction tool .
|
2,798
|
def predict_netmhcii_binding ( job , peptfile , allele , univ_options , netmhciipan_options ) : work_dir = os . getcwd ( ) input_files = { 'peptfile.faa' : peptfile } input_files = get_files_from_filestore ( job , input_files , work_dir , docker = True ) peptides = read_peptide_file ( os . path . join ( os . getcwd ( ) , 'peptfile.faa' ) ) if not peptides : return job . fileStore . writeGlobalFile ( job . fileStore . getLocalTempFile ( ) ) , None if allele . startswith ( 'HLA-DQA' ) or allele . startswith ( 'HLA-DPA' ) : allele = re . sub ( r'[*:]' , '' , allele ) allele = re . sub ( r'/' , '-' , allele ) elif allele . startswith ( 'HLA-DRB' ) : allele = re . sub ( r':' , '' , allele ) allele = re . sub ( r'\*' , '_' , allele ) allele = allele . lstrip ( 'HLA-' ) else : raise RuntimeError ( 'Unknown allele seen' ) parameters = [ '-a' , allele , '-xls' , '1' , '-xlsfile' , 'predictions.tsv' , '-f' , input_files [ 'peptfile.faa' ] ] with open ( os . devnull , 'w' ) as output_catcher : docker_call ( tool = 'netmhciipan' , tool_parameters = parameters , work_dir = work_dir , dockerhub = univ_options [ 'dockerhub' ] , outfile = output_catcher , tool_version = netmhciipan_options [ 'version' ] ) output_file = job . fileStore . writeGlobalFile ( '/' . join ( [ work_dir , 'predictions.tsv' ] ) ) job . fileStore . logToMaster ( 'Ran netmhciipan on %s successfully' % allele ) return output_file , 'netMHCIIpan'
|
Predict binding for each peptide in peptfile to allele using netMHCIIpan .
|
2,799
|
def update ( self , permission ) : self . needs . update ( permission . needs ) self . excludes . update ( permission . excludes )
|
In - place update of permissions .
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.