idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
13,000
def dumps ( self , obj ) : out = BytesIO ( ) try : self . dump ( obj , out ) return out . getvalue ( ) finally : out . close ( )
Serializes obj to an avro - format byte array and returns it .
13,001
def loads ( self , data ) : st = BytesIO ( data ) try : return self . load ( st ) finally : st . close ( )
Deserializes the given byte array into an object and returns it .
13,002
def create ( cls , parent , child , relation_type , index = None ) : try : with db . session . begin_nested ( ) : obj = cls ( parent_id = parent . id , child_id = child . id , relation_type = relation_type , index = index ) db . session . add ( obj ) except IntegrityError : raise Exception ( "PID Relation already exists." ) return obj
Create a PID relation for given parent and child .
13,003
def relation_exists ( self , parent , child , relation_type ) : return PIDRelation . query . filter_by ( child_pid_id = child . id , parent_pid_id = parent . id , relation_type = relation_type ) . count ( ) > 0
Determine if given relation already exists .
13,004
def df ( unit = 'GB' ) : details = { } headers = [ 'Filesystem' , 'Type' , 'Size' , 'Used' , 'Available' , 'Capacity' , 'MountedOn' ] n = len ( headers ) unit = df_conversions [ unit ] p = subprocess . Popen ( args = [ 'df' , '-TP' ] , stdout = subprocess . PIPE ) stdout , stderr = p . communicate ( ) lines = stdout . split ( "\n" ) lines [ 0 ] = lines [ 0 ] . replace ( "Mounted on" , "MountedOn" ) . replace ( "1K-blocks" , "Size" ) . replace ( "1024-blocks" , "Size" ) assert ( lines [ 0 ] . split ( ) == headers ) lines = [ l . strip ( ) for l in lines if l . strip ( ) ] for line in lines [ 1 : ] : tokens = line . split ( ) if tokens [ 0 ] == 'none' : continue assert ( len ( tokens ) == n ) d = { } for x in range ( 1 , len ( headers ) ) : d [ headers [ x ] ] = tokens [ x ] d [ 'Size' ] = float ( d [ 'Size' ] ) / unit assert ( d [ 'Capacity' ] . endswith ( "%" ) ) d [ 'Use%' ] = d [ 'Capacity' ] d [ 'Used' ] = float ( d [ 'Used' ] ) / unit d [ 'Available' ] = float ( d [ 'Available' ] ) / unit d [ 'Using' ] = 100 * ( d [ 'Used' ] / d [ 'Size' ] ) if d [ 'Type' ] . startswith ( 'ext' ) : pass d [ 'Using' ] += 5 else : ext3_filesystems = [ 'ganon:' , 'kortemmelab:' , 'albana:' ] for e3fs in ext3_filesystems : if tokens [ 0 ] . find ( e3fs ) != - 1 : d [ 'Using' ] += 5 break details [ tokens [ 0 ] ] = d return details
A wrapper for the df shell command .
13,005
def url_replace ( context , field , value ) : query_string = context [ 'request' ] . GET . copy ( ) query_string [ field ] = value return query_string . urlencode ( )
To avoid GET params losing
13,006
def ellipsis_or_number ( context , paginator , current_page ) : chosen_page = int ( context [ 'request' ] . GET [ 'page' ] ) if 'page' in context [ 'request' ] . GET else 1 if current_page in ( chosen_page + 1 , chosen_page + 2 , chosen_page - 1 , chosen_page - 2 , paginator . num_pages , paginator . num_pages - 1 , 1 , 2 , chosen_page ) : return current_page if current_page in ( chosen_page + 3 , chosen_page - 3 ) : return '...'
To avoid display a long pagination bar
13,007
def create_items ( sender , instance , ** kwargs ) : if instance . item_id is None and instance . item is None : item = Item ( ) if hasattr ( instance , 'active' ) : item . active = getattr ( instance , 'active' ) item . save ( ) instance . item = item
When one of the defined objects is created initialize also its item .
13,008
def add_parent ( sender , instance , ** kwargs ) : if not kwargs [ 'created' ] : return for att in [ 'task' , 'context' ] : parent = getattr ( instance , att ) . item_id child = instance . item_id ItemRelation . objects . get_or_create ( parent_id = parent , child_id = child , visible = True , )
When a task instance is created create also an item relation .
13,009
def change_parent ( sender , instance , ** kwargs ) : if instance . id is None : return if len ( { 'task' , 'task_id' } & set ( instance . changed_fields ) ) != 0 : diff = instance . diff parent = diff [ 'task' ] [ 0 ] if 'task' in diff else diff [ 'task_id' ] [ 0 ] parent_id = parent . item_id if isinstance ( parent , Task ) else Task . objects . get ( pk = parent ) . item_id child_id = instance . item_id ItemRelation . objects . filter ( parent_id = parent_id , child_id = child_id ) . delete ( ) ItemRelation . objects . create ( parent_id = instance . task . item_id , child_id = child_id , visible = True ) if len ( { 'context' , 'context_id' } & set ( instance . changed_fields ) ) != 0 : diff = instance . diff parent = diff [ 'context' ] [ 0 ] if 'context' in diff else diff [ 'context_id' ] [ 0 ] parent_id = parent . item_id if isinstance ( parent , Context ) else Context . objects . get ( pk = parent ) . item_id child_id = instance . item_id ItemRelation . objects . filter ( parent_id = parent_id , child_id = child_id ) . delete ( ) ItemRelation . objects . create ( parent_id = instance . context . item_id , child_id = child_id , visible = False )
When the given task instance has changed . Look at task and context and change the corresponding item relation .
13,010
def delete_parent ( sender , instance , ** kwargs ) : ItemRelation . objects . filter ( child_id = instance . item_id ) . delete ( )
When the given task instance is deleted delete also the corresponding item relations .
13,011
def align_to_other ( self , other , mapping , self_root_pair , other_root_pair = None ) : if other_root_pair == None : other_root_pair = self_root_pair assert ( len ( self_root_pair ) == len ( other_root_pair ) ) unmoved_atom_names = [ ] new_coords = [ None for x in xrange ( len ( self_root_pair ) ) ] for atom in self . names : if atom in self_root_pair : i = self_root_pair . index ( atom ) assert ( new_coords [ i ] == None ) new_coords [ i ] = self . get_coords_for_name ( atom ) if atom in mapping : other_atom = mapping [ atom ] self . set_coords_for_name ( atom , other . get_coords_for_name ( other_atom ) ) else : unmoved_atom_names . append ( atom ) if None in new_coords : print new_coords assert ( None not in new_coords ) ref_coords = [ other . get_coords_for_name ( x ) for x in other_root_pair ] U , new_centroid , ref_centroid = calc_rotation_translation_matrices ( ref_coords , new_coords ) for atom in unmoved_atom_names : original_coord = self . get_coords_for_name ( atom ) self . set_coords_for_name ( atom , rotate_and_translate_coord ( original_coord , U , new_centroid , ref_centroid ) ) self . chain = other . chain
root atoms are atom which all other unmapped atoms will be mapped off of
13,012
def pumper ( html_generator ) : source = html_generator ( ) parser = etree . HTMLPullParser ( events = ( 'start' , 'end' ) , remove_comments = True ) while True : for element in parser . read_events ( ) : yield element try : parser . feed ( next ( source ) ) except StopIteration : parser . feed ( '</html>' ) for element in parser . read_events ( ) : yield element break
Pulls HTML from source generator feeds it to the parser and yields DOM elements .
13,013
def date_to_long_form_string ( dt , locale_ = 'en_US.utf8' ) : if locale_ : old_locale = locale . getlocale ( ) locale . setlocale ( locale . LC_ALL , locale_ ) v = dt . strftime ( "%A %B %d %Y" ) if locale_ : locale . setlocale ( locale . LC_ALL , old_locale ) return v
dt should be a datetime . date object .
13,014
def static_get_pdb_object ( pdb_id , bio_cache = None , cache_dir = None ) : pdb_id = pdb_id . upper ( ) if bio_cache : return bio_cache . get_pdb_object ( pdb_id ) if cache_dir : filepath = os . path . join ( cache_dir , '{0}.pdb' . format ( pdb_id ) ) if os . path . exists ( filepath ) : return PDB . from_filepath ( filepath ) pdb_contents = retrieve_pdb ( pdb_id ) if cache_dir : write_file ( os . path . join ( cache_dir , "%s.pdb" % pdb_id ) , pdb_contents ) return PDB ( pdb_contents )
This method does not necessarily use a BioCache but it seems to fit here .
13,015
def rebuild_app ( app_name , quiet = False , force = True , without_exec = False , restart = False ) : user = 'cozy-{app_name}' . format ( app_name = app_name ) home = '{prefix}/{app_name}' . format ( prefix = PREFIX , app_name = app_name ) command_line = 'cd {home}' . format ( home = home ) command_line += ' && git pull' if force : command_line += ' && ([ -d node_modules ] && rm -rf node_modules || true)' command_line += ' && ([ -d .node-gyp ] && rm -rf .node-gyp || true)' command_line += ' && ([ -d .npm ] && rm -rf .npm || true)' command_line += ' && chown -R {user}:{user} .' . format ( user = user ) command_line += ' && sudo -u {user} env HOME={home} npm install --production' . format ( user = user , home = home ) if restart : command_line += ' && cozy-monitor update {app_name}' . format ( app_name = app_name ) command_line += ' && cozy-monitor restart {app_name}' . format ( app_name = app_name ) if not quiet : print 'Execute:' print command_line if not without_exec : result = helpers . cmd_exec ( command_line ) print result [ 'stdout' ] print result [ 'stderr' ] print result [ 'error' ]
Rebuild cozy apps with deletion of npm directory & new npm build
13,016
def rebuild_all_apps ( force = True , restart = False ) : cozy_apps = monitor . status ( only_cozy = True ) for app in cozy_apps . keys ( ) : rebuild_app ( app , force = force , restart = restart )
Get all cozy apps & rebuild npm repository
13,017
def restart_stopped_apps ( ) : cozy_apps = monitor . status ( only_cozy = True ) for app in cozy_apps . keys ( ) : state = cozy_apps [ app ] if state == 'up' : next elif state == 'down' : print 'Start {}' . format ( app ) rebuild_app ( app , force = False ) monitor . start ( app )
Restart all apps in stopped state
13,018
def migrate_2_node4 ( ) : helpers . cmd_exec ( 'npm install -g cozy-monitor cozy-controller' , show_output = True ) helpers . cmd_exec ( 'update-cozy-stack' , show_output = True ) helpers . cmd_exec ( 'update-all' , show_output = True ) helpers . cmd_exec ( 'rm /etc/supervisor/conf.d/cozy-indexer.conf' , show_output = True ) helpers . cmd_exec ( 'supervisorctl reload' , show_output = True ) helpers . wait_cozy_stack ( ) ssl . normalize_cert_dir ( ) helpers . cmd_exec ( 'apt-get update' , show_output = True ) helpers . cmd_exec ( 'echo "cozy cozy/nodejs_apt_list text " | debconf-set-selections' , show_output = True ) helpers . cmd_exec ( 'apt-get install -y cozy-apt-node-list' , show_output = True ) helpers . cmd_exec ( 'apt-get update' , show_output = True ) helpers . cmd_exec ( 'apt-get remove -y nodejs-legacy' , show_output = True ) helpers . cmd_exec ( 'apt-get remove -y nodejs-dev' , show_output = True ) helpers . cmd_exec ( 'apt-get remove -y npm' , show_output = True ) helpers . cmd_exec ( 'apt-get install -y nodejs' , show_output = True ) helpers . cmd_exec ( 'apt-get install -y cozy' , show_output = True ) helpers . cmd_exec ( 'npm install -g cozy-monitor cozy-controller' , show_output = True ) rebuild_app ( 'data-system' ) rebuild_app ( 'home' ) rebuild_app ( 'proxy' ) helpers . cmd_exec ( 'supervisorctl restart cozy-controller' , show_output = True ) helpers . wait_cozy_stack ( ) rebuild_all_apps ( restart = True ) restart_stopped_apps ( ) helpers . cmd_exec ( 'apt-get install -y cozy' , show_output = True )
Migrate existing cozy to node4
13,019
def install_requirements ( ) : helpers . cmd_exec ( 'echo "cozy cozy/nodejs_apt_list text " | debconf-set-selections' , show_output = True ) helpers . cmd_exec ( 'apt-get install -y cozy-apt-node-list' , show_output = True ) helpers . cmd_exec ( 'apt-get update' , show_output = True ) command_line = 'apt-get install -y nodejs' command_line += ' && apt-get install -y cozy-depends' return_code = helpers . cmd_exec ( command_line , show_output = True ) if return_code != 0 : sys . exit ( return_code ) weboob . install ( )
Install cozy requirements
13,020
def add_message ( self , text , type = None ) : key = self . _msg_key self . setdefault ( key , [ ] ) self [ key ] . append ( message ( type , text ) ) self . save ( )
Add a message with an optional type .
13,021
def pop_messages ( self , type = None ) : key = self . _msg_key messages = [ ] if type is None : messages = self . pop ( key , [ ] ) else : keep_messages = [ ] for msg in self . get ( key , [ ] ) : if msg . type == type : messages . append ( msg ) else : keep_messages . append ( msg ) if not keep_messages and key in self : del self [ key ] else : self [ key ] = keep_messages if messages : self . save ( ) return messages
Retrieve stored messages and remove them from the session .
13,022
def vote_random ( candidates , votes , n_winners ) : rcands = list ( candidates ) shuffle ( rcands ) rcands = rcands [ : min ( n_winners , len ( rcands ) ) ] best = [ ( i , 0.0 ) for i in rcands ] return best
Select random winners from the candidates .
13,023
def vote_least_worst ( candidates , votes , n_winners ) : worsts = { str ( c ) : 100000000.0 for c in candidates } for v in votes : for e in v : if worsts [ str ( e [ 0 ] ) ] > e [ 1 ] : worsts [ str ( e [ 0 ] ) ] = e [ 1 ] s = sorted ( worsts . items ( ) , key = lambda x : x [ 1 ] , reverse = True ) best = s [ : min ( n_winners , len ( candidates ) ) ] d = [ ] for e in best : for c in candidates : if str ( c ) == e [ 0 ] : d . append ( ( c , e [ 1 ] ) ) return d
Select least worst artifact as the winner of the vote .
13,024
def vote_best ( candidates , votes , n_winners ) : best = [ votes [ 0 ] [ 0 ] ] for v in votes [ 1 : ] : if v [ 0 ] [ 1 ] > best [ 0 ] [ 1 ] : best = [ v [ 0 ] ] return best
Select the artifact with the single best evaluation as the winner of the vote .
13,025
def _remove_zeros ( votes , fpl , cl , ranking ) : for v in votes : for r in v : if r not in fpl : v . remove ( r ) for c in cl : if c not in fpl : if c not in ranking : ranking . append ( ( c , 0 ) )
Remove zeros in IRV voting .
13,026
def _remove_last ( votes , fpl , cl , ranking ) : for v in votes : for r in v : if r == fpl [ - 1 ] : v . remove ( r ) for c in cl : if c == fpl [ - 1 ] : if c not in ranking : ranking . append ( ( c , len ( ranking ) + 1 ) )
Remove last candidate in IRV voting .
13,027
def vote_IRV ( candidates , votes , n_winners ) : votes = [ [ e [ 0 ] for e in v ] for v in votes ] f = lambda x : Counter ( e [ 0 ] for e in x ) . most_common ( ) cl = list ( candidates ) ranking = [ ] fp = f ( votes ) fpl = [ e [ 0 ] for e in fp ] while len ( fpl ) > 1 : _remove_zeros ( votes , fpl , cl , ranking ) _remove_last ( votes , fpl , cl , ranking ) cl = fpl [ : - 1 ] fp = f ( votes ) fpl = [ e [ 0 ] for e in fp ] ranking . append ( ( fpl [ 0 ] , len ( ranking ) + 1 ) ) ranking = list ( reversed ( ranking ) ) return ranking [ : min ( n_winners , len ( ranking ) ) ]
Perform IRV voting based on votes .
13,028
def vote_mean ( candidates , votes , n_winners ) : sums = { str ( candidate ) : [ ] for candidate in candidates } for vote in votes : for v in vote : sums [ str ( v [ 0 ] ) ] . append ( v [ 1 ] ) for s in sums : sums [ s ] = sum ( sums [ s ] ) / len ( sums [ s ] ) ordering = list ( sums . items ( ) ) ordering . sort ( key = operator . itemgetter ( 1 ) , reverse = True ) best = ordering [ : min ( n_winners , len ( ordering ) ) ] d = [ ] for e in best : for c in candidates : if str ( c ) == e [ 0 ] : d . append ( ( c , e [ 1 ] ) ) return d
Perform mean voting based on votes .
13,029
def vote ( self , candidates ) : ranks = [ ( c , self . evaluate ( c ) [ 0 ] ) for c in candidates ] ranks . sort ( key = operator . itemgetter ( 1 ) , reverse = True ) return ranks
Rank artifact candidates .
13,030
def add_candidate ( self , artifact ) : self . candidates . append ( artifact ) self . _log ( logging . DEBUG , "CANDIDATES appended:'{}'" . format ( artifact ) )
Add candidate artifact to the list of current candidates .
13,031
def validate_candidates ( self , candidates ) : valid_candidates = set ( candidates ) for a in self . get_agents ( addr = False ) : vc = set ( a . validate ( candidates ) ) valid_candidates = valid_candidates . intersection ( vc ) return list ( valid_candidates )
Validate the candidate artifacts with the agents in the environment .
13,032
def gather_votes ( self , candidates ) : votes = [ ] for a in self . get_agents ( addr = False ) : vote = a . vote ( candidates ) votes . append ( vote ) return votes
Gather votes for the given candidates from the agents in the environment .
13,033
def get_managers ( self ) : if self . _single_env : return None if not hasattr ( self , '_managers' ) : self . _managers = self . env . get_slave_managers ( ) return self . _managers
Get managers for the slave environments .
13,034
def gather_votes ( self ) : async def slave_task ( addr , candidates ) : r_manager = await self . env . connect ( addr ) return await r_manager . gather_votes ( candidates ) if len ( self . candidates ) == 0 : self . _log ( logging . DEBUG , "Could not gather votes because there " "are no candidates!" ) self . _votes = [ ] return self . _log ( logging . DEBUG , "Gathering votes for {} candidates." . format ( len ( self . candidates ) ) ) if self . _single_env : self . _votes = self . env . gather_votes ( self . candidates ) else : mgrs = self . get_managers ( ) tasks = create_tasks ( slave_task , mgrs , self . candidates ) self . _votes = run ( tasks )
Gather votes from all the underlying slave environments for the current list of candidates .
13,035
def gather_candidates ( self ) : async def slave_task ( addr ) : r_manager = await self . env . connect ( addr ) return await r_manager . get_candidates ( ) if self . _single_env : self . _candidates = self . env . candidates else : mgrs = self . get_managers ( ) tasks = create_tasks ( slave_task , mgrs ) self . _candidates = run ( tasks )
Gather candidates from the slave environments .
13,036
def clear_candidates ( self , clear_env = True ) : async def slave_task ( addr ) : r_manager = await self . env . connect ( addr ) return await r_manager . clear_candidates ( ) self . _candidates = [ ] if clear_env : if self . _single_env : self . env . clear_candidates ( ) else : mgrs = self . get_managers ( ) run ( create_tasks ( slave_task , mgrs ) )
Clear the current candidates .
13,037
def validate_candidates ( self ) : async def slave_task ( addr , candidates ) : r_manager = await self . env . connect ( addr ) return await r_manager . validate_candidates ( candidates ) self . _log ( logging . DEBUG , "Validating {} candidates" . format ( len ( self . candidates ) ) ) candidates = self . candidates if self . _single_env : self . _candidates = self . env . validate_candidates ( candidates ) else : mgrs = self . get_managers ( ) tasks = create_tasks ( slave_task , mgrs , candidates , flatten = False ) rets = run ( tasks ) valid_candidates = set ( self . candidates ) for r in rets : valid_candidates = valid_candidates . intersection ( set ( r ) ) self . _candidates = list ( valid_candidates ) self . _log ( logging . DEBUG , "{} candidates after validation" . format ( len ( self . candidates ) ) )
Validate current candidates .
13,038
def gather_and_vote ( self , voting_method , validate = False , winners = 1 , ** kwargs ) : self . gather_candidates ( ) if validate : self . validate_candidates ( ) self . gather_votes ( ) r = self . compute_results ( voting_method , self . votes , winners = winners , ** kwargs ) return r
Convenience function to gathering candidates and votes and performing voting using them .
13,039
def start_device ( name , frontend , backend ) : device = getattr ( devices , name ) device ( frontend , backend )
Start specified device
13,040
def start ( self , transaction_context = None ) : transaction_context = transaction_context or { } context_cmd = { 'command' : 'set_transaction_context' , 'msg' : transaction_context } self . publish ( context_cmd ) self . publish ( self . START )
Publish start message to all turrets
13,041
def process_message ( self , message , is_started = False ) : if not self . master : return False if 'status' not in message : return False message [ 'name' ] = message [ 'turret' ] del message [ 'turret' ] if not self . add ( message , is_started ) : return self . update ( message ) return True
Process incomming message from turret
13,042
def add ( self , turret_data , is_started = False ) : if turret_data . get ( 'uuid' ) in self . turrets : return False turret = Turret ( ** turret_data ) self . write ( turret ) self . turrets [ turret . uuid ] = turret if is_started : self . publish ( self . START , turret . uuid ) return True
Add a turret object to current turrets configuration
13,043
def update ( self , turret_data ) : if turret_data . get ( 'uuid' ) not in self . turrets : return False turret = self . turrets [ turret_data . get ( 'uuid' ) ] turret . update ( ** turret_data ) self . write ( turret ) return True
Update a given turret
13,044
def publish ( self , message , channel = None ) : if not self . master : return channel = channel or '' data = json . dumps ( message ) self . publisher . send_string ( "%s %s" % ( channel , data ) )
Publish a message for all turrets
13,045
def open_recruitment ( self , n = 1 ) : from psiturk . amt_services import MTurkServices , RDSServices from psiturk . psiturk_shell import PsiturkNetworkShell from psiturk . psiturk_org_services import PsiturkOrgServices psiturk_access_key_id = os . getenv ( "psiturk_access_key_id" , self . config . get ( "psiTurk Access" , "psiturk_access_key_id" ) ) psiturk_secret_access_id = os . getenv ( "psiturk_secret_access_id" , self . config . get ( "psiTurk Access" , "psiturk_secret_access_id" ) ) web_services = PsiturkOrgServices ( psiturk_access_key_id , psiturk_secret_access_id ) aws_rds_services = RDSServices ( self . aws_access_key_id , self . aws_secret_access_key , self . aws_region ) self . amt_services = MTurkServices ( self . aws_access_key_id , self . aws_secret_access_key , self . config . getboolean ( 'Shell Parameters' , 'launch_in_sandbox_mode' ) ) self . shell = PsiturkNetworkShell ( self . config , self . amt_services , aws_rds_services , web_services , self . server , self . config . getboolean ( 'Shell Parameters' , 'launch_in_sandbox_mode' ) ) try : participants = Participant . query . all ( ) assert ( participants ) except Exception : self . shell . hit_create ( n , self . config . get ( 'HIT Configuration' , 'base_payment' ) , self . config . get ( 'HIT Configuration' , 'duration' ) ) else : print "Reject recruitment reopening: experiment has started."
Open recruitment for the first HIT unless it s already open .
13,046
def approve_hit ( self , assignment_id ) : from psiturk . amt_services import MTurkServices self . amt_services = MTurkServices ( self . aws_access_key_id , self . aws_secret_access_key , self . config . getboolean ( 'Shell Parameters' , 'launch_in_sandbox_mode' ) ) return self . amt_services . approve_worker ( assignment_id )
Approve the HIT .
13,047
def reward_bonus ( self , assignment_id , amount , reason ) : from psiturk . amt_services import MTurkServices self . amt_services = MTurkServices ( self . aws_access_key_id , self . aws_secret_access_key , self . config . getboolean ( 'Shell Parameters' , 'launch_in_sandbox_mode' ) ) return self . amt_services . bonus_worker ( assignment_id , amount , reason )
Reward the Turker with a bonus .
13,048
def keys ( cls ) : if cls . _cache_keys is None : cls . _cache_keys = [ c . name for c in cls . __table__ . _columns ] return cls . _cache_keys
return list of all declared columns .
13,049
def random ( cls , engine_or_session , limit = 5 ) : ses , auto_close = ensure_session ( engine_or_session ) result = ses . query ( cls ) . order_by ( func . random ( ) ) . limit ( limit ) . all ( ) if auto_close : ses . close ( ) return result
Return random ORM instance .
13,050
def main ( ) : u import argparse from pkg_resources import require parser = argparse . ArgumentParser ( description = main . __doc__ ) parser . add_argument ( "data" , nargs = argparse . REMAINDER , help = "Floating point data, any delimiter." ) parser . add_argument ( "--version" , "-v" , action = "store_true" , help = "Display the version number and exit." ) args = parser . parse_args ( ) if args . version : version = require ( "pysparklines" ) [ 0 ] . version print ( version ) sys . exit ( 0 ) if os . isatty ( 0 ) and not args . data : parser . print_help ( ) sys . exit ( 1 ) elif args . data : arg_string = u' ' . join ( args . data ) else : arg_string = sys . stdin . read ( ) try : output = sparkify ( guess_series ( arg_string ) ) except : sys . stderr . write ( "Could not convert input data to valid sparkline\n" ) sys . exit ( 1 ) print ( output . encode ( 'utf-8' , 'ignore' ) )
u Reads from command line args or stdin and prints a sparkline from the data . Requires at least 2 data points as input .
13,051
def is_active ( self , state : 'State' ) -> bool : sub_state = state . sub_state_by_gene_name ( * self . expression . variables ) if sub_state not in self . _is_active : params = self . _transform_state_to_dict ( sub_state ) self . _is_active [ sub_state ] = self . expression . evaluate ( ** params ) return self . _is_active [ sub_state ]
Return True if the multiplex is active in the given state false otherwise .
13,052
def get ( self , key ) : try : layers = key . split ( '.' ) value = self . registrar for key in layers : value = value [ key ] return value except : return None
Function deeply gets the key with . notation
13,053
def set ( self , key , value ) : target = self . registrar for element in key . split ( '.' ) [ : - 1 ] : target = target . setdefault ( element , dict ( ) ) target [ key . split ( "." ) [ - 1 ] ] = value
Function deeply sets the key with . notation
13,054
def boot ( cls , * args , ** kwargs ) : if cls . accessor is not None : if cls . instance is None : cls . instance = cls . accessor ( * args , ** kwargs )
Function creates the instance of accessor with dynamic positional & keyword arguments .
13,055
def register ( cls , config = { } ) : if cls . accessor is not None : if cls . instance is None : cls . instance = cls . accessor ( config )
This function is basically a shortcut of boot for accessors that have only the config dict argument .
13,056
def get_complex ( self ) : d = dict ( LName = self . lname , LShortName = self . lshortname , LHTMLName = self . lhtmlname , RName = self . rname , RShortName = self . rshortname , RHTMLName = self . rhtmlname , FunctionalClassID = self . functional_class_id , PPDBMFunctionalClassID = self . functional_class_id_ppdbm , PPDBMDifficulty = self . difficulty_ppdbm , IsWildType = self . is_wildtype , WildTypeComplexID = self . wildtype_complex , Notes = self . notes , Warnings = self . warnings , ) if self . id : d [ 'ID' ] = self . id return d
Returns the record for the complex definition to be used for database storage .
13,057
def get_pdb_sets ( self ) : assert ( self . id != None ) data = [ ] for pdb_set in self . pdb_sets : pdb_set_record = dict ( PPComplexID = self . id , SetNumber = pdb_set [ 'set_number' ] , IsComplex = pdb_set [ 'is_complex' ] , Notes = pdb_set [ 'notes' ] , ) chain_records = [ ] for side , chain_details in sorted ( pdb_set [ 'chains' ] . iteritems ( ) ) : chain_records . append ( dict ( PPComplexID = self . id , SetNumber = pdb_set [ 'set_number' ] , Side = side , ChainIndex = chain_details [ 'chain_index' ] , PDBFileID = chain_details [ 'pdb_file_id' ] , Chain = chain_details [ 'chain_id' ] , NMRModel = chain_details [ 'nmr_model' ] , ) ) data . append ( dict ( pdb_set = pdb_set_record , chain_records = chain_records ) ) return data
Return a record to be used for database storage . This only makes sense if self . id is set . See usage example above .
13,058
def parse_singular_float ( t , tag_name ) : pos = t . getElementsByTagName ( tag_name ) assert ( len ( pos ) == 1 ) pos = pos [ 0 ] assert ( len ( pos . childNodes ) == 1 ) return float ( pos . childNodes [ 0 ] . data )
Parses the sole floating point value with name tag_name in tag t . Heavy - handed with the asserts .
13,059
def parse_singular_int ( t , tag_name ) : pos = t . getElementsByTagName ( tag_name ) assert ( len ( pos ) == 1 ) pos = pos [ 0 ] assert ( len ( pos . childNodes ) == 1 ) v = pos . childNodes [ 0 ] . data assert ( v . isdigit ( ) ) return int ( v )
Parses the sole integer value with name tag_name in tag t . Heavy - handed with the asserts .
13,060
def parse_singular_alphabetic_character ( t , tag_name ) : pos = t . getElementsByTagName ( tag_name ) assert ( len ( pos ) == 1 ) pos = pos [ 0 ] assert ( len ( pos . childNodes ) == 1 ) v = pos . childNodes [ 0 ] . data assert ( len ( v ) == 1 and v >= 'A' and 'v' <= 'z' ) return v
Parses the sole alphabetic character value with name tag_name in tag t . Heavy - handed with the asserts .
13,061
def parse_singular_string ( t , tag_name ) : pos = t . getElementsByTagName ( tag_name ) assert ( len ( pos ) == 1 ) pos = pos [ 0 ] assert ( len ( pos . childNodes ) == 1 ) return pos . childNodes [ 0 ] . data
Parses the sole string value with name tag_name in tag t . Heavy - handed with the asserts .
13,062
def timer ( ) : if sys . platform == "win32" : default_timer = time . clock else : default_timer = time . time return default_timer ( )
Timer used for calculate time elapsed
13,063
def roulette ( weights , n ) : if n > len ( weights ) : raise Exception ( "Can't choose {} samples from {} items" . format ( n , len ( weights ) ) ) if any ( map ( lambda w : w <= 0 , weights . values ( ) ) ) : raise Exception ( "The weight can't be a non-positive number." ) items = weights . items ( ) chosen = set ( ) for i in range ( n ) : total = sum ( list ( zip ( * items ) ) [ 1 ] ) dice = random . random ( ) * total running_weight = 0 chosen_item = None for item , weight in items : if dice < running_weight + weight : chosen_item = item break running_weight += weight chosen . add ( chosen_item ) items = [ ( i , w ) for ( i , w ) in items if i != chosen_item ] return list ( chosen )
Choose randomly the given number of items . The probability the item is chosen is proportionate to its weight .
13,064
def as_dict ( self ) : d = { k : v for ( k , v ) in self . __dict__ . items ( ) } return d
Return the URI object as a dictionary
13,065
def get_title ( self , group = None ) : title = super ( CommentsPlugin , self ) . get_title ( ) if group is not None : count = GroupComments . objects . filter ( group = group ) . count ( ) else : count = None if count : title = u'%s (%d)' % ( title , count ) return title
Adds number of comments to title .
13,066
def view ( self , request , group , ** kwargs ) : if request . method == 'POST' : message = request . POST . get ( 'message' ) if message is not None and message . strip ( ) : comment = GroupComments ( group = group , author = request . user , message = message . strip ( ) ) comment . save ( ) msg = _ ( u'Comment added.' ) if request . POST . get ( 'sendmail' , '' ) : self . _send_mail ( comment , group ) if 'postresolve' in request . POST : self . _resolve_group ( request , group ) msg = _ ( u'Comment added and event marked as resolved.' ) messages . success ( request , msg ) return HttpResponseRedirect ( request . path ) query = GroupComments . objects . filter ( group = group ) . order_by ( '-created' ) return self . render ( 'sentry_comments/index.html' , { 'comments' : query , 'group' : group , } )
Display and store comments .
13,067
def generate_graphs ( data , name , results_dir ) : graphs . resp_graph_raw ( data [ 'raw' ] , name + '_response_times.svg' , results_dir ) graphs . resp_graph ( data [ 'compiled' ] , name + '_response_times_intervals.svg' , results_dir ) graphs . tp_graph ( data [ 'compiled' ] , name + '_throughput.svg' , results_dir )
Generate all reports from original dataframe
13,068
def print_infos ( results ) : print ( 'transactions: %i' % results . total_transactions ) print ( 'timers: %i' % results . total_timers ) print ( 'errors: %i' % results . total_errors ) print ( 'test start: %s' % results . start_datetime ) print ( 'test finish: %s\n' % results . finish_datetime )
Print informations in standard output
13,069
def write_template ( data , results_dir , parent ) : print ( "Generating html report..." ) partial = time . time ( ) j_env = Environment ( loader = FileSystemLoader ( os . path . join ( results_dir , parent , 'templates' ) ) ) template = j_env . get_template ( 'report.html' ) report_writer = ReportWriter ( results_dir , parent ) report_writer . write_report ( template . render ( data ) ) print ( "HTML report generated in {} seconds\n" . format ( time . time ( ) - partial ) )
Write the html template
13,070
def output ( results_dir , config , parent = '../../' ) : start = time . time ( ) print ( "Compiling results..." ) results_dir = os . path . abspath ( results_dir ) results = ReportResults ( config [ 'run_time' ] , config [ 'results_ts_interval' ] ) results . compile_results ( ) print ( "Results compiled in {} seconds\n" . format ( time . time ( ) - start ) ) if results . total_transactions == 0 : print ( "No results, cannot create report" ) return False print_infos ( results ) data = { 'report' : results , 'run_time' : config [ 'run_time' ] , 'ts_interval' : config [ 'results_ts_interval' ] , 'turrets_config' : results . turrets , 'results' : { "all" : results . main_results , "timers" : results . timers_results } } print ( "Generating graphs..." ) partial = time . time ( ) generate_graphs ( results . main_results , 'All_Transactions' , results_dir ) for key , value in results . timers_results . items ( ) : generate_graphs ( value , key , results_dir ) print ( "All graphs generated in {} seconds\n" . format ( time . time ( ) - partial ) ) write_template ( data , results_dir , parent ) print ( "Full report generated in {} seconds" . format ( time . time ( ) - start ) ) return True
Write the results output for the given test
13,071
def safeMkdir ( p , permissions = permissions755 ) : try : os . mkdir ( p ) except OSError : pass os . chmod ( p , permissions )
Wrapper around os . mkdir which does not raise an error if the directory exists .
13,072
def install_dependencies ( feature = None ) : import subprocess echo ( green ( '\nInstall dependencies:' ) ) echo ( green ( '-' * 40 ) ) req_path = os . path . realpath ( os . path . dirname ( __file__ ) + '/../_requirements' ) if not feature : echo ( yellow ( 'Please specify a feature to install. \n' ) ) for index , item in enumerate ( os . listdir ( req_path ) ) : item = item . replace ( '.txt' , '' ) echo ( green ( '{}. {}' . format ( index + 1 , item ) ) ) echo ( ) return feature_file = feature . lower ( ) + '.txt' feature_reqs = os . path . join ( req_path , feature_file ) if not os . path . isfile ( feature_reqs ) : msg = 'Unable to locate feature requirements file [{}]' echo ( red ( msg . format ( feature_file ) ) + '\n' ) return msg = 'Now installing dependencies for "{}" feature...' . format ( feature ) echo ( yellow ( msg ) ) subprocess . check_call ( [ sys . executable , '-m' , 'pip' , 'install' , '-r' , feature_reqs ] ) reqs = os . path . join ( os . getcwd ( ) , 'requirements.txt' ) if os . path . exists ( reqs ) : with open ( reqs ) as file : existing = [ x . strip ( ) . split ( '==' ) [ 0 ] for x in file . readlines ( ) if x ] lines = [ '\n' ] with open ( feature_reqs ) as file : incoming = file . readlines ( ) for line in incoming : if not ( len ( line ) ) or line . startswith ( '#' ) : lines . append ( line ) continue package = line . strip ( ) . split ( '==' ) [ 0 ] if package not in existing : lines . append ( line ) with open ( reqs , 'a' ) as file : file . writelines ( lines ) echo ( green ( 'DONE\n' ) )
Install dependencies for a feature
13,073
def _post ( self , xml_query ) : req = urllib2 . Request ( url = 'http://www.rcsb.org/pdb/rest/search' , data = xml_query ) f = urllib2 . urlopen ( req ) return f . read ( ) . strip ( )
POST the request .
13,074
def simple_hashstring ( obj , bits = 64 ) : basis = mmh3 . hash64 ( str ( obj ) ) [ 0 ] >> ( 64 - bits ) if bits == 64 : raw_hash = struct . pack ( '!q' , basis ) else : raw_hash = struct . pack ( '!q' , basis ) [ : - int ( ( 64 - bits ) / 8 ) ] hashstr = base64 . urlsafe_b64encode ( raw_hash ) . rstrip ( b"=" ) return hashstr . decode ( 'ascii' )
Creates a simple hash in brief string form from obj bits is an optional bit width defaulting to 64 and should be in multiples of 8 with a maximum of 64
13,075
def create_slug ( title , plain_len = None ) : if plain_len : title = title [ : plain_len ] pass1 = OMIT_FROM_SLUG_PAT . sub ( '_' , title ) . lower ( ) return NORMALIZE_UNDERSCORES_PAT . sub ( '_' , pass1 )
Tries to create a slug from a title trading off collision risk with readability and minimized cruft
13,076
def profiling_query_formatter ( view , context , query_document , name ) : return Markup ( '' . join ( [ '<div class="pymongo-query row">' , '<div class="col-md-1">' , '<a href="{}">' . format ( query_document . get_admin_url ( _external = True ) ) , mongo_command_name_formatter ( view , context , query_document , 'command_name' ) , '</div>' , '<div class="col-md-10">' , profiling_pure_query_formatter ( None , None , query_document , 'command' , tag = 'pre' ) , '</div>' , '<div class="col-md-1">' , '<small>{} ms</small>' . format ( query_document . duration ) , '</a>' , '</div>' , '</div>' , ] ) )
Format a ProfilingQuery entry for a ProfilingRequest detail field
13,077
def make_response ( obj ) : if obj is None : raise TypeError ( "Handler return value cannot be None." ) if isinstance ( obj , Response ) : return obj return Response ( 200 , body = obj )
Try to coerce an object into a Response object .
13,078
def resolve_handler ( request , view_handlers ) : view = None if request . _context : route_name = request . _context [ - 1 ] . route . name if route_name and VIEW_SEPARATOR in route_name : view = route_name . split ( VIEW_SEPARATOR , 1 ) [ 1 ] or None if view not in view_handlers : raise NotFound method_handlers = view_handlers [ view ] verb = request . method if verb not in method_handlers : if verb == 'HEAD' and 'GET' in method_handlers : verb = 'GET' else : allowed_methods = set ( method_handlers . keys ( ) ) if 'HEAD' not in allowed_methods and 'GET' in allowed_methods : allowed_methods . add ( 'HEAD' ) allow = ', ' . join ( sorted ( allowed_methods ) ) raise MethodNotAllowed ( allow = allow ) handlers = method_handlers [ verb ] vary = set ( ) if len ( set ( h . provides for h in handlers if h . provides is not None ) ) > 1 : vary . add ( 'Accept' ) if len ( set ( h . accepts for h in handlers ) ) > 1 : vary . add ( 'Content-Type' ) content_type = request . content_type if content_type : handlers = negotiate_content_type ( content_type , handlers ) if not handlers : raise UnsupportedMediaType accept = request . headers . get ( 'Accept' ) if accept : handlers = negotiate_accept ( accept , handlers ) if not handlers : raise NotAcceptable return handlers [ 0 ] , vary
Select a suitable handler to handle the request .
13,079
def negotiate_content_type ( content_type , handlers ) : accepted = [ h . accepts for h in handlers ] scored_ranges = [ ( mimeparse . fitness_and_quality_parsed ( content_type , [ mimeparse . parse_media_range ( mr ) ] ) , mr ) for mr in accepted ] scored_ranges . sort ( reverse = True ) best_score = scored_ranges [ 0 ] [ 0 ] if best_score == MIMEPARSE_NO_MATCH or not best_score [ 1 ] : return [ ] media_ranges = [ pair [ 1 ] for pair in scored_ranges if pair [ 0 ] == best_score ] best_range = media_ranges [ 0 ] return [ h for h in handlers if h . accepts == best_range ]
Filter handlers that accept a given content - type .
13,080
def negotiate_accept ( accept , handlers ) : provided = [ h . provides for h in handlers ] if None in provided : return [ h for h in handlers if h . provides is None ] else : best_match = mimeparse . best_match ( reversed ( provided ) , accept ) return [ h for h in handlers if h . provides == best_match ]
Filter handlers that provide an acceptable mime - type .
13,081
def get ( self , key , default = None , type = None ) : try : value = self [ key ] if type is not None : return type ( value ) return value except ( KeyError , ValueError ) : return default
Returns the first value for a key .
13,082
def getall ( self , key , type = None ) : values = [ ] for k , v in self . _items : if k == key : if type is not None : try : values . append ( type ( v ) ) except ValueError : pass else : values . append ( v ) return values
Return a list of values for the given key .
13,083
def url_for ( * args , ** kw ) : self , target , args = args [ 0 ] , args [ 1 ] , list ( args [ 2 : ] ) query = kw . pop ( '_query' , None ) relative = kw . pop ( '_relative' , False ) url = build_url ( self . _context , target , args , kw ) if query : if isinstance ( query , dict ) : query = sorted ( query . items ( ) ) query_part = urllib . urlencode ( query ) query_sep = '&' if '?' in url else '?' url = url + query_sep + query_part if relative : return url else : return urlparse . urljoin ( self . application_uri , url )
Build the URL for a target route .
13,084
def input ( self ) : if self . _input is None : input_file = self . environ [ 'wsgi.input' ] content_length = self . content_length or 0 self . _input = WsgiInput ( input_file , self . content_length ) return self . _input
Returns a file - like object representing the request body .
13,085
def body ( self ) : if self . _body is None : if self . _body_reader is None : self . _body = self . input . read ( self . content_length or 0 ) else : self . _body = self . _body_reader ( self . input ) return self . _body
Reads and returns the entire request body .
13,086
def form ( self ) : if self . _form is None : environ = self . environ . copy ( ) environ [ 'QUERY_STRING' ] = '' environ [ 'REQUEST_METHOD' ] = 'POST' fs = cgi . FieldStorage ( fp = self . input , environ = environ , keep_blank_values = True ) fields = [ ] for f in fs . list or [ ] : if f . filename : f . filename = f . filename . decode ( 'utf-8' ) fields . append ( ( f . name . decode ( 'utf-8' ) , f ) ) else : fields . append ( ( f . name . decode ( 'utf-8' ) , f . value . decode ( 'utf-8' ) ) ) self . _form = QueryDict ( fields ) return self . _form
Reads the request body and tries to parse it as a web form .
13,087
def cookies ( self ) : if self . _cookies is None : c = SimpleCookie ( self . environ . get ( 'HTTP_COOKIE' ) ) self . _cookies = dict ( [ ( k . decode ( 'utf-8' ) , v . value . decode ( 'utf-8' ) ) for k , v in c . items ( ) ] ) return self . _cookies
Returns a dictionary mapping cookie names to their values .
13,088
def _format_parameter_error_message ( name : str , sig : Signature , num_params : int ) -> str : if num_params == 0 : plural = 's' missing = 2 arguments = "'slack' and 'event'" else : plural = '' missing = 1 arguments = "'event'" return ( f"{name}{sig} missing {missing} required positional " f"argument{plural}: {arguments}" )
Format an error message for missing positional arguments .
13,089
def connect_with_retry ( self ) -> None : if self . is_connected ( ) : log . debug ( 'Already connected to the Slack API' ) return for retry in range ( 1 , self . retries + 1 ) : self . connect ( ) if self . is_connected ( ) : log . debug ( 'Connected to the Slack API' ) return else : interval = self . backoff ( retry ) log . debug ( "Waiting %.3fs before retrying" , interval ) time . sleep ( interval ) raise FailedConnection ( 'Failed to connect to the Slack API' )
Attempt to connect to the Slack API . Retry on failures .
13,090
def fetch_events ( self ) -> List [ dict ] : try : return self . inner . rtm_read ( ) except TimeoutError : log . debug ( 'Lost connection to the Slack API, attempting to ' 'reconnect' ) self . connect_with_retry ( ) return [ ]
Fetch new RTM events from the API .
13,091
def _ensure_slack ( self , connector : Any , retries : int , backoff : Callable [ [ int ] , float ] ) -> None : connector = self . _env_var if connector is None else connector slack : SlackClient = _create_slack ( connector ) self . _slack = _SlackClientWrapper ( slack = slack , retries = retries , backoff = backoff )
Ensure we have a SlackClient .
13,092
def run ( self , * , connector : Union [ EnvVar , Token , SlackClient , None ] = None , interval : float = 0.5 , retries : int = 16 , backoff : Callable [ [ int ] , float ] = None , until : Callable [ [ List [ dict ] ] , bool ] = None ) -> None : backoff = backoff or _truncated_exponential until = until or _forever self . _ensure_slack ( connector = connector , retries = retries , backoff = backoff ) assert self . _slack is not None while True : events = self . _slack . fetch_events ( ) if not until ( events ) : log . debug ( 'Exiting event loop' ) break for event in events : type_ = event . get ( 'type' , '' ) for handler in self . _handlers [ type_ ] + self . _handlers [ '*' ] : fn , kwargs = handler fn ( self . _slack . inner , event , ** kwargs ) time . sleep ( interval )
Connect to the Slack API and run the event handler loop .
13,093
def install ( ) : tmp_weboob_dir = '/tmp/weboob' while ( os . path . exists ( tmp_weboob_dir ) ) : tmp_weboob_dir += '1' print 'Fetching sources in temporary dir {}' . format ( tmp_weboob_dir ) result = cmd_exec ( 'git clone {} {}' . format ( WEBOOB_REPO , tmp_weboob_dir ) ) if ( result [ 'error' ] ) : print result [ 'stderr' ] print 'Weboob installation failed: could not clone repository' exit ( ) print 'Sources fetched, will now process to installation' result = cmd_exec ( 'cd {} && ./setup.py install' . format ( tmp_weboob_dir ) ) shutil . rmtree ( tmp_weboob_dir ) if ( result [ 'error' ] ) : print result [ 'stderr' ] print 'Weboob installation failed: setup failed' exit ( ) print result [ 'stdout' ] weboob_version = get_weboob_version ( ) if ( not weboob_version ) : print 'Weboob installation failed: version not detected' exit ( ) print 'Weboob (version: {}) installation succeeded' . format ( weboob_version ) update ( )
Install weboob system - wide
13,094
def add_many ( self , rels ) : for curr_rel in rels : attrs = self . _attr_cls ( ) if len ( curr_rel ) == 2 : origin , rel , target , attrs = curr_rel [ 1 ] elif len ( curr_rel ) == 3 : origin , rel , target = curr_rel elif len ( curr_rel ) == 4 : origin , rel , target , attrs = curr_rel else : raise ValueError assert rel self . add ( origin , rel , target , attrs ) return
Add a list of relationships to the extent
13,095
def call_repeatedly ( func , interval , * args , ** kwargs ) : main_thead = threading . current_thread ( ) stopped = threading . Event ( ) def loop ( ) : while not stopped . wait ( interval ) and main_thead . is_alive ( ) : func ( * args , ** kwargs ) return timer_thread = threading . Thread ( target = loop , daemon = True ) timer_thread . start ( ) atexit . register ( stopped . set ) return timer_thread , stopped . set
Call a function at interval Returns both the thread object and the loop stopper Event .
13,096
def execute ( command , return_output = True , log_file = None , log_settings = None , error_logfile = None , timeout = None , line_function = None , poll_timing = 0.01 , logger = None , working_folder = None , env = None ) : tmp_log = False if log_settings : log_folder = log_settings . get ( 'LOG_FOLDER' ) else : tmp_log = True log_folder = tempfile . mkdtemp ( ) if not log_file : log_file = os . path . join ( log_folder , "commands" , "execute-command-logfile-%s.log" % UUID . uuid4 ( ) ) try : if not os . path . isdir ( os . path . join ( log_folder , "commands" ) ) : os . makedirs ( os . path . join ( log_folder , "commands" ) ) except : pass if not logger : logger = logging . getLogger ( 'command_execute' ) logfile_writer = open ( log_file , 'a' ) header = "%s - Executing command (timeout=%s) :\n\t%s\n\n\n" % ( datetime . now ( ) . isoformat ( ) , timeout , command ) logfile_writer . write ( header ) logfile_writer . flush ( ) logfile_reader = open ( log_file , 'rb' ) logfile_reader . seek ( 0 , os . SEEK_END ) logfile_start_position = logfile_reader . tell ( ) if error_logfile : err_logfile_writer = open ( error_logfile , 'a' ) else : err_logfile_writer = logfile_writer start = datetime . now ( ) timeout_string = "" if timeout : timeout_string = "(timeout=%s)" % timeout logger . info ( u"Executing command %s :\n\t\t%s" % ( timeout_string , command ) ) if sys . platform != 'win32' : command = u"exec %s" % text_utils . uni ( command ) process = subprocess . Popen ( command , stdout = logfile_writer , stderr = err_logfile_writer , bufsize = 1 , shell = True , cwd = working_folder , env = env ) while process . poll ( ) == None : time . sleep ( poll_timing ) if timeout != None : now = datetime . now ( ) if ( now - start ) . seconds > timeout : os . kill ( process . pid , signal . SIGKILL ) os . waitpid ( - 1 , os . WNOHANG ) raise Exception ( "Command execution timed out (took more than %s seconds...)" % timeout ) if line_function : o = text_utils . uni ( logfile_reader . readline ( ) ) . rstrip ( ) while o != '' : line_function ( o ) o = text_utils . uni ( logfile_reader . readline ( ) ) . rstrip ( ) if not return_output : return process . wait ( ) logfile_reader . seek ( logfile_start_position , os . SEEK_SET ) res = text_utils . uni ( logfile_reader . read ( ) ) try : logfile_reader . close ( ) logfile_writer . close ( ) err_logfile_writer . close ( ) if tmp_log : shutil . rmtree ( log_folder , ignore_errors = True ) except : logger . exception ( "Error while cleaning after tbx.execute() call." ) return res
Execute a program and logs standard output into a file .
13,097
def save ( self , role , commit = True ) : self . is_instance ( role ) schema = RoleSchema ( ) valid = schema . process ( role ) if not valid : return valid db . session . add ( role ) if commit : db . session . commit ( ) events . role_saved_event . send ( role ) return role
Persist role model
13,098
def update_phase ( self , environment , data , prediction , user , item , correct , time , answer_id , ** kwargs ) : pass
After the prediction update the environment and persist some information for the predictive model .
13,099
def append_flanking_markers ( qtls_mk_file , flanking_markers ) : matrix = read_input_file ( qtls_mk_file , sep = ',' ) output = [ ] cnt = 0 for row in matrix : if cnt == 0 : markers = [ 'LOD2 interval start' , 'LOD2 interval end' ] elif row [ 3 ] in flanking_markers : markers = flanking_markers [ row [ 3 ] ] else : markers = [ 'NA' , 'NA' ] cnt += 1 row . extend ( markers ) output . append ( row ) write_matrix ( qtls_mk_file , output )
Append the flanking markers extracted in the process of generating the MapChart to the QTL list file .