idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
53,500
def subgraph_from ( self , targets : sos_targets ) : if 'DAG' in env . config [ 'SOS_DEBUG' ] or 'ALL' in env . config [ 'SOS_DEBUG' ] : env . log_to_file ( 'DAG' , 'create subgraph' ) subnodes = [ ] for node in self . nodes ( ) : if node . _output_targets . valid ( ) and any ( x in node . _output_targets for x in targets ) : subnodes . append ( node ) ancestors = set ( ) for node in subnodes : ancestors |= nx . ancestors ( self , node ) return SoS_DAG ( nx . subgraph ( self , subnodes + list ( ancestors ) ) )
Trim DAG to keep only nodes that produce targets
53,501
def build ( self ) : if 'DAG' in env . config [ 'SOS_DEBUG' ] or 'ALL' in env . config [ 'SOS_DEBUG' ] : env . log_to_file ( 'DAG' , 'build DAG' ) for wf in range ( self . _forward_workflow_id + 1 ) : indexed = [ x for x in self . nodes ( ) if x . _wf_index == wf ] indexed . sort ( key = lambda x : x . _node_index ) for idx , node in enumerate ( indexed ) : if node . _context [ '__changed_vars__' ] : for later_node in indexed [ idx + 1 : ] : if node . _context [ '__changed_vars__' ] & ( later_node . _context [ '__signature_vars__' ] | later_node . _context [ '__environ_vars__' ] ) : self . add_edge ( node , later_node ) if not node . _input_targets . valid ( ) and idx > 0 : if node . _input_targets . undetermined ( ) : if 'dynamic' in node . _context [ '__environ_vars__' ] : self . add_edge ( indexed [ idx - 1 ] , node ) else : for prev_node in indexed [ idx - 1 : : - 1 ] : if node . _context [ '__environ_vars__' ] & prev_node . _context [ '__changed_vars__' ] : self . add_edge ( prev_node , node ) else : self . add_edge ( indexed [ idx - 1 ] , node ) for target , in_node in self . _all_depends_files . items ( ) : if target not in self . _all_output_files : continue out_node = self . _all_output_files [ target ] for i in in_node : for j in out_node : if j != i : self . add_edge ( j , i ) self . mark_dirty ( )
Connect nodes according to status of targets
53,502
def monitor_tasks ( self , tasks = None , status = None , age = None ) : self . engine_ready . wait ( ) if not tasks : tasks = self . task_status . keys ( ) else : tasks = [ x for x in tasks if x in self . task_status ] with threading . Lock ( ) : for task in tasks : if self . task_status [ task ] in ( 'submitted' , 'running' ) and task not in self . running_tasks : self . running_tasks . append ( task ) if age is not None : age = expand_time ( age , default_unit = 'd' ) return sorted ( [ ( x , self . task_status [ x ] , self . task_info [ x ] . get ( 'data' , ( time . time ( ) , None , None ) ) ) for x in tasks if ( status is None or self . task_status [ x ] in status ) and ( age is None or ( ( age > 0 and time . time ( ) - self . task_info [ x ] . get ( 'date' , ( time . time ( ) , None , None ) ) [ 0 ] > age ) or ( age < 0 and time . time ( ) - self . task_info [ x ] . get ( 'date' , ( time . time ( ) , None , None ) ) [ 0 ] < - age ) ) ) ] , key = lambda x : - x [ 2 ] [ 0 ] )
Start monitoring specified or all tasks
53,503
def _submit_task_with_template ( self , task_ids ) : runtime = self . config runtime . update ( { 'workdir' : os . getcwd ( ) , 'cur_dir' : os . getcwd ( ) , 'verbosity' : env . verbosity , 'sig_mode' : env . config . get ( 'sig_mode' , 'default' ) , 'run_mode' : env . config . get ( 'run_mode' , 'run' ) , 'home_dir' : os . path . expanduser ( '~' ) } ) if '_runtime' in env . sos_dict : runtime . update ( { x : env . sos_dict [ '_runtime' ] [ x ] for x in ( 'nodes' , 'cores' , 'workdir' , 'mem' , 'walltime' ) if x in env . sos_dict [ '_runtime' ] } ) if 'nodes' not in runtime : runtime [ 'nodes' ] = 1 if 'cores' not in runtime : runtime [ 'cores' ] = 1 job_text = '' for task_id in task_ids : runtime [ 'task' ] = task_id try : job_text += cfg_interpolate ( self . job_template , runtime ) job_text += '\n' except Exception as e : raise ValueError ( f'Failed to generate job file for task {task_id}: {e}' ) filename = task_ids [ 0 ] + ( '.sh' if len ( task_ids ) == 1 else f'-{task_ids[-1]}.sh' ) job_file = os . path . join ( os . path . expanduser ( '~' ) , '.sos' , 'tasks' , filename ) with open ( job_file , 'w' , newline = '' ) as job : job . write ( job_text ) self . agent . send_task_file ( job_file ) try : cmd = f'bash ~/.sos/tasks/{filename}' self . agent . run_command ( cmd , wait_for_task = self . wait_for_task ) except Exception as e : raise RuntimeError ( f'Failed to submit task {task_ids}: {e}' ) return True
Submit tasks by interpolating a shell script defined in job_template
53,504
def is_type_hint ( stmt : str ) -> bool : if stmt . count ( '=' ) > 1 : return False if ':' not in stmt : return False if not stmt . split ( ':' ) [ 1 ] . strip ( ) : return False if '=' not in stmt : action , par = [ x . strip ( ) for x in stmt . split ( ':' , 1 ) ] else : action , par = [ x . strip ( ) for x in stmt . split ( '=' , 1 ) [ 0 ] . split ( ':' , 1 ) ] if action in SOS_DIRECTIVES : return False if par in SOS_ACTION_OPTIONS : return False if not par . isidentifier ( ) : return True if action in dir ( builtins ) : return False global _action_list if _action_list is None : import pkg_resources _action_list = [ x . name for x in pkg_resources . iter_entry_points ( group = 'sos_actions' ) ] if action in _action_list : return False if par in dir ( typing ) or par in dir ( builtins ) : return True env . logger . debug ( f"Failed to tell if '{stmt}' is an assignment with type hint or function in script format. Assuming type hint." ) return True
Try to differentiate
53,505
def indented_script ( self ) -> bool : leading = INDENTED . match ( self . _script ) return 0 if leading is None else len ( leading . group ( 2 ) )
check self . _script and see if it is indented
53,506
def category ( self ) -> Optional [ str ] : if self . statements : if self . statements [ - 1 ] [ 0 ] == ':' : def validDirective ( ) : if not self . values : return True if self . values [ - 1 ] . strip ( ) . endswith ( ',' ) : return False try : compile ( 'func(' + '' . join ( self . values ) + ')' , filename = '<string>' , mode = 'eval' ) except Exception : return False return True if validDirective ( ) and self . _action is not None : return 'script' return 'directive' return 'statements' return None
Determine the category of existing statement
53,507
def isValid ( self ) -> bool : if not self . values : return True try : if self . category ( ) == 'directive' : if self . values [ - 1 ] . strip ( ) . endswith ( ',' ) : self . error_msg = 'Trailing ,' return False try : compile ( 'func(' + '' . join ( self . values ) + ')' , filename = '<string>' , mode = 'eval' ) except : compile ( 'def func(' + '' . join ( self . values ) + '):\n pass' , filename = '<string>' , mode = 'exec' ) elif self . category ( ) == 'statements' : compile ( ( '' . join ( self . values ) ) , filename = '<string>' , mode = 'exec' ) elif self . category ( ) == 'script' : return True else : raise RuntimeError ( f'Unrecognized expression type {self.category()}' ) return True except Exception as e : self . error_msg = repr ( e ) return False
Determine if the statement expression or directive is valid . Otherwise the parser will continue until a valid multi - line expression or statement can be found .
53,508
def extend ( self , line : str ) -> None : if self . category ( ) == 'directive' : self . add_directive ( None , line ) elif self . category ( ) == 'script' : self . _script += line else : self . add_statement ( line )
Extend the current directive expression or script
53,509
def add_statement ( self , line : str , lineno : Optional [ int ] = None ) -> None : if self . category ( ) != 'statements' : self . values = [ line ] else : self . values . append ( line ) if self . statements and self . statements [ - 1 ] [ 0 ] == '!' : self . statements [ - 1 ] [ - 1 ] += line else : self . statements . append ( [ '!' , line ] ) if lineno : self . lineno = lineno
statements are regular python statements
53,510
def get_tokens ( self ) -> str : def _get_tokens ( statement ) : return [ x [ 1 ] for x in generate_tokens ( StringIO ( statement ) . readline ) if x [ 1 ] not in ( '' , '\n' ) ] tokens : List = [ ] for statement in self . statements : tokens . extend ( _get_tokens ( statement [ 2 ] if statement [ 0 ] == ':' else statement [ 1 ] ) ) if self . task : tokens . extend ( _get_tokens ( self . task ) ) return ' ' . join ( tokens )
Get tokens after input statement
53,511
def show ( self ) : textWidth = max ( 60 , shutil . get_terminal_size ( ( 80 , 20 ) ) . columns ) text = f' {self.step_name() + ":":<21} ' + self . comment print ( '\n' . join ( textwrap . wrap ( text , width = textWidth , initial_indent = '' , subsequent_indent = ' ' * 24 ) ) ) local_parameters = { x : y for x , y in self . parameters . items ( ) if x not in self . global_parameters } if local_parameters : print ( ' Workflow Options:' ) for name , ( value , comment ) in local_parameters . items ( ) : par_str = f' {format_par(name, value)}' print ( par_str ) if comment : print ( '\n' . join ( textwrap . wrap ( comment , width = textWidth , initial_indent = ' ' * 24 , subsequent_indent = ' ' * 24 ) ) )
Output for command sos show
53,512
def extend ( self , workflow : 'SoS_Workflow' ) -> None : if not workflow . sections : return if not self . sections : self . sections = workflow . sections return section = workflow . sections [ 0 ] depends_idx = [ idx for idx , stmt in enumerate ( section . statements ) if stmt [ 0 ] == ':' and stmt [ 1 ] == 'depends' ] if not depends_idx : section . statements . insert ( 0 , [ ':' , 'depends' , f"sos_step('{self.sections[-1].step_name()}')" ] ) else : section . statements [ depends_idx [ 0 ] ] [ 2 ] = section . statements [ depends_idx [ 0 ] ] [ 2 ] . strip ( ) + ( ", " if section . statements [ depends_idx [ 0 ] ] [ 2 ] . strip ( ) else "" ) + f"sos_step('{self.sections[-1].step_name()}')\n" self . sections . extend ( workflow . sections )
Append another workflow to existing one to created a combined workflow
53,513
def add_comment ( self , line : str ) -> None : self . _last_comment += ( ' ' if self . _last_comment else '' ) + line . lstrip ( '#' ) . strip ( )
Keeping track of last comment for section and parameter
53,514
def workflow ( self , workflow_name : Optional [ str ] = None , use_default : bool = True ) -> SoS_Workflow : if workflow_name is None and not use_default : return SoS_Workflow ( self . content , '' , '' , self . sections , self . global_stmts ) allowed_steps = None if not workflow_name : wf_name = '' else : if '+' in workflow_name : wfs = [ ] for wf in workflow_name . split ( '+' ) : if not SOS_SUBWORKFLOW . match ( wf ) : raise ValueError ( f'Incorrect workflow name {workflow_name}' ) wfs . append ( self . workflow ( wf ) ) combined_wf = wfs [ 0 ] for wf in wfs [ 1 : ] : combined_wf . extend ( wf ) combined_wf . name = workflow_name return combined_wf mo = SOS_SUBWORKFLOW . match ( workflow_name ) if not mo : raise ValueError ( f'Incorrect workflow name {workflow_name}' ) wf_name , allowed_steps = mo . group ( 'name' , 'steps' ) if not wf_name : if len ( self . workflows ) == 1 : wf_name = list ( self . workflows ) [ 0 ] elif self . default_workflow : wf_name = self . default_workflow elif 'default' in self . workflows or '' in self . workflows : wf_name = 'default' else : raise ValueError ( 'Name of workflow should be specified because ' 'the script defines more than one pipelines without a default one. ' 'Available pipelines are: {}.' . format ( ', ' . join ( self . workflows ) ) ) elif wf_name not in self . workflows and wf_name != 'default' : raise ValueError ( f'Workflow {wf_name} is undefined. Available workflows are: {", ".join(self.workflows)}' ) return SoS_Workflow ( self . content , wf_name , allowed_steps , self . sections , self . global_stmts )
Return a workflow with name_step + name_step specified in wf_name This function might be called recursively because of nested workflow .
53,515
def print_help ( self , script_name : str ) : textWidth = max ( 60 , shutil . get_terminal_size ( ( 80 , 20 ) ) . columns ) if len ( script_name ) > 20 : print ( f'usage: sos run {script_name}' ) print ( ' [workflow_name | -t targets] [options] [workflow_options]' ) else : print ( f'usage: sos run {script_name} [workflow_name | -t targets] [options] [workflow_options]' ) print ( ' workflow_name: Single or combined workflows defined in this script' ) print ( ' targets: One or more targets to generate' ) print ( ' options: Single-hyphen sos parameters (see "sos run -h" for details)' ) print ( ' workflow_options: Double-hyphen workflow-specific parameters' ) description = [ x . lstrip ( '# ' ) . strip ( ) for x in self . description ] description = textwrap . dedent ( '\n' . join ( description ) ) . strip ( ) if description : print ( '\n' + description ) print ( '\nWorkflows:' ) print ( ' ' + '\n ' . join ( self . workflows ) ) global_parameters = { } for section in self . sections : global_parameters . update ( section . global_parameters ) if global_parameters : print ( '\nGlobal Workflow Options:' ) for name , ( value , comment ) in global_parameters . items ( ) : par_str = f' {format_par(name, value)}' print ( par_str ) if comment : print ( '\n' . join ( textwrap . wrap ( comment , width = textWidth , initial_indent = ' ' * 24 , subsequent_indent = ' ' * 24 ) ) ) print ( '\nSections' ) for section in self . sections : section . show ( )
print a help message from the script
53,516
def glob_wildcards ( pattern : str , files : Optional [ List [ str ] ] = None ) -> Dict [ str , Union [ List [ Any ] , List [ str ] ] ] : pattern = os . path . normpath ( pattern ) if sys . platform == 'win32' : pattern = pattern . replace ( '\\' , '/' ) first_wildcard = re . search ( "{[^{]" , pattern ) dirname = os . path . dirname ( pattern [ : first_wildcard . start ( ) ] ) if first_wildcard else os . path . dirname ( pattern ) if not dirname : dirname = "." names = [ match . group ( 'name' ) for match in SOS_WILDCARD . finditer ( pattern ) ] res = { x : [ ] for x in names } pattern = re . compile ( regex ( pattern ) ) if files is None : files = ( ( os . path . join ( dirpath , f ) if dirpath != "." else f ) for dirpath , dirnames , filenames in os . walk ( dirname ) for f in chain ( filenames , dirnames ) ) for f in files : match = re . match ( pattern , str ( f ) . replace ( '\\' , '/' ) ) if match : for name , value in match . groupdict ( ) . items ( ) : res [ name ] . append ( value ) return res
Glob the values of the wildcards by matching the given pattern to the filesystem . Returns a named tuple with a list of values for each wildcard .
53,517
def extract_pattern ( pattern : str , ifiles : List [ str ] ) -> Dict [ str , any ] : res = glob_wildcards ( pattern , [ ] ) for ifile in ifiles : matched = glob_wildcards ( pattern , [ ifile ] ) for key in matched . keys ( ) : if not matched [ key ] : res [ key ] . append ( None ) else : res [ key ] . extend ( matched [ key ] ) return res
This function match pattern to a list of input files extract and return pieces of filenames as a list of variables with keys defined by pattern .
53,518
def expand_pattern ( pattern : str ) -> List [ str ] : ofiles = [ ] sz = None res = glob_wildcards ( pattern , [ ] ) sz = None wildcard = [ { } ] for key in res . keys ( ) : if key not in env . sos_dict : raise ValueError ( f'Undefined variable {key} in pattern {pattern}' ) if not isinstance ( env . sos_dict [ key ] , str ) and isinstance ( env . sos_dict [ key ] , collections . Sequence ) : if sz is None : sz = len ( env . sos_dict [ key ] ) wildcard = [ copy . deepcopy ( wildcard [ 0 ] ) for x in range ( sz ) ] elif sz != len ( env . sos_dict [ key ] ) : raise ValueError ( f'Variables in output pattern should have the same length (other={sz}, len({key})={len(env.sos_dict[key])})' ) for idx , value in enumerate ( env . sos_dict [ key ] ) : wildcard [ idx ] [ key ] = value else : for v in wildcard : v [ key ] = env . sos_dict [ key ] for card in wildcard : ofiles . append ( apply_wildcards ( pattern , card , fill_missing = False , fail_dynamic = False , dynamic_fill = None , keep_dynamic = False ) ) return ofiles
This function expand patterns against the current namespace and return a list of filenames
53,519
def interpolate ( text , global_dict = None , local_dict = None ) : try : return eval ( as_fstring ( text ) , global_dict , local_dict ) except Exception as e : raise ValueError ( f'Failed to interpolate {text}: {e}' )
Evaluate expressions in text
53,520
def SoS_eval ( expr : str , extra_dict : dict = { } ) -> Any : return eval ( expr , env . sos_dict . dict ( ) , extra_dict )
Evaluate an expression with sos dict .
53,521
def SoS_exec ( script : str , _dict : dict = None , return_result : bool = True ) -> None : if _dict is None : _dict = env . sos_dict . dict ( ) if not return_result : exec ( compile ( script , filename = stmtHash . hash ( script ) , mode = 'exec' ) , _dict ) return None try : stmts = list ( ast . iter_child_nodes ( ast . parse ( script ) ) ) if not stmts : return if isinstance ( stmts [ - 1 ] , ast . Expr ) : if len ( stmts ) > 1 : exec ( compile ( ast . Module ( body = stmts [ : - 1 ] ) , filename = stmtHash . hash ( script ) , mode = "exec" ) , _dict ) res = eval ( compile ( ast . Expression ( body = stmts [ - 1 ] . value ) , filename = stmtHash . hash ( script ) , mode = "eval" ) , _dict ) else : exec ( compile ( script , filename = stmtHash . hash ( script ) , mode = 'exec' ) , _dict ) res = None except SyntaxError as e : raise SyntaxError ( f"Invalid code {script}: {e}" ) return res
Execute a statement .
53,522
def expand_depends_files ( * args , ** kwargs ) : args = [ x . resolve ( ) if isinstance ( x , dynamic ) else x for x in args ] kwargs = { x : ( y . resolve ( ) if isinstance ( y , dynamic ) else y ) for x , y in kwargs . items ( ) } return sos_targets ( * args , ** kwargs , _verify_existence = True , _undetermined = False , _source = env . sos_dict [ 'step_name' ] )
handle directive depends
53,523
def wait_for_subworkflows ( self , workflow_results ) : wf_ids = sum ( [ x [ 'pending_workflows' ] for x in workflow_results ] , [ ] ) for wf_id in wf_ids : yield self . socket res = self . socket . recv_pyobj ( ) if res is None : sys . exit ( 0 ) elif isinstance ( res , Exception ) : raise res
Wait for results from subworkflows
53,524
def Rmarkdown ( script = None , input = None , output = None , args = '{input:r}, output_file={output:ar}' , ** kwargs ) : if not R_library ( 'rmarkdown' ) . target_exists ( ) : raise RuntimeError ( 'Library rmarkdown does not exist' ) input = sos_targets ( collect_input ( script , input ) ) output = sos_targets ( output ) if len ( output ) == 0 : write_to_stdout = True output = sos_targets ( tempfile . NamedTemporaryFile ( mode = 'w+t' , suffix = '.html' , delete = False ) . name ) else : write_to_stdout = False ret = 1 try : cmd = interpolate ( f'Rscript -e "rmarkdown::render({args})"' , { 'input' : input , 'output' : output } ) if 'ACTION' in env . config [ 'SOS_DEBUG' ] or 'ALL' in env . config [ 'SOS_DEBUG' ] : env . log_to_file ( 'ACTION' , f'Running command "{cmd}"' ) if env . config [ 'run_mode' ] == 'interactive' : p = subprocess . Popen ( cmd , shell = True , stderr = subprocess . PIPE , stdout = subprocess . PIPE ) out , err = p . communicate ( ) sys . stdout . write ( out . decode ( ) ) sys . stderr . write ( err . decode ( ) ) ret = p . returncode else : p = subprocess . Popen ( cmd , shell = True ) ret = p . wait ( ) except Exception as e : env . logger . error ( e ) if ret != 0 : temp_file = os . path . join ( '.sos' , f'{"Rmarkdown"}_{os.getpid()}.md' ) shutil . copyfile ( str ( input ) , temp_file ) cmd = interpolate ( f'Rscript -e "rmarkdown::render({args})"' , { 'input' : input , 'output' : sos_targets ( temp_file ) } ) raise RuntimeError ( f'Failed to execute script. Please use command \n"{cmd}"\nunder {os.getcwd()} to test it.' ) if write_to_stdout : with open ( str ( output [ 0 ] ) ) as out : sys . stdout . write ( out . read ( ) ) else : env . logger . info ( f'Report saved to {output}' )
Convert input file to output using Rmarkdown
53,525
def total_memory ( self , image = 'ubuntu' ) : try : ret = subprocess . check_output ( f , shell = True , stdin = subprocess . DEVNULL ) self . tot_mem = int ( ret . split ( ) [ 1 ] ) except Exception : self . tot_mem = None return self . tot_mem
Get the available ram fo the docker machine in Kb
53,526
def script ( script , interpreter = '' , suffix = '' , args = '' , ** kwargs ) : return SoS_ExecuteScript ( script , interpreter , suffix , args ) . run ( ** kwargs )
Execute specified script using specified interpreter . This action accepts common action arguments such as input active workdir docker_image and args . In particular content of one or more files specified by option input would be prepended before the specified script .
53,527
def stop_if ( expr , msg = '' , no_output = False ) : if expr : raise StopInputGroup ( msg = msg , keep_output = not no_output ) return 0
Abort the execution of the current step or loop and yield an warning message msg if expr is False
53,528
def download ( URLs , dest_dir = '.' , dest_file = None , decompress = False , max_jobs = 5 ) : if env . config [ 'run_mode' ] == 'dryrun' : print ( f'HINT: download\n{URLs}\n' ) return None if isinstance ( URLs , str ) : urls = [ x . strip ( ) for x in URLs . split ( ) if x . strip ( ) ] else : urls = list ( URLs ) if not urls : env . logger . debug ( f'No download URL specified: {URLs}' ) return if dest_file is not None and len ( urls ) != 1 : raise RuntimeError ( 'Only one URL is allowed if a destination file is specified.' ) if dest_file is None : filenames = [ ] for idx , url in enumerate ( urls ) : token = urllib . parse . urlparse ( url ) if not all ( [ getattr ( token , qualifying_attr ) for qualifying_attr in ( 'scheme' , 'netloc' ) ] ) : raise ValueError ( f'Invalid URL {url}' ) filename = os . path . split ( token . path ) [ - 1 ] if not filename : raise ValueError ( f'Cannot determine destination file for {url}' ) filenames . append ( os . path . join ( dest_dir , filename ) ) else : token = urllib . parse . urlparse ( urls [ 0 ] ) if not all ( [ getattr ( token , qualifying_attr ) for qualifying_attr in ( 'scheme' , 'netloc' ) ] ) : raise ValueError ( f'Invalid URL {url}' ) filenames = [ dest_file ] succ = [ ( False , None ) for x in urls ] with ProcessPoolExecutor ( max_workers = max_jobs ) as executor : for idx , ( url , filename ) in enumerate ( zip ( urls , filenames ) ) : succ [ idx ] = executor . submit ( downloadURL , url , filename , decompress , idx ) succ = [ x . result ( ) for x in succ ] failed = [ y for x , y in zip ( succ , urls ) if not x ] if failed : if len ( urls ) == 1 : raise RuntimeError ( 'Failed to download {urls[0]}' ) else : raise RuntimeError ( f'Failed to download {failed[0]} ({len(failed)} out of {len(urls)})' ) return 0
Download files from specified URL which should be space tab or newline separated URLs . The files will be downloaded to specified destination . If filename . md5 files are downloaded they are used to validate downloaded filename . Unless otherwise specified compressed files are decompressed . If max_jobs is given a maximum of max_jobs concurrent download jobs will be used for each domain . This restriction applies to domain names and will be applied to multiple download instances .
53,529
def run ( script , args = '' , ** kwargs ) : if sys . platform == 'win32' : interpreter = '' else : if not script . startswith ( '#!' ) : interpreter = '/bin/bash' if not args : args = '-ev {filename:q}' else : interpreter = '' return SoS_ExecuteScript ( script , interpreter , '' , args ) . run ( ** kwargs )
Execute specified script using bash . This action accepts common action arguments such as input active workdir docker_image and args . In particular content of one or more files specified by option input would be prepended before the specified script .
53,530
def pandoc ( script = None , input = None , output = None , args = '{input:q} --output {output:q}' , ** kwargs ) : if not executable ( 'pandoc' ) . target_exists ( ) : raise RuntimeError ( 'pandoc not found' ) input = sos_targets ( collect_input ( script , input ) ) output = sos_targets ( output ) if len ( output ) == 0 : write_to_stdout = True output = sos_targets ( tempfile . NamedTemporaryFile ( mode = 'w+t' , suffix = '.html' , delete = False ) . name ) else : write_to_stdout = False ret = 1 try : p = None cmd = interpolate ( f'pandoc {args}' , { 'input' : input , 'output' : output } ) if 'ACTION' in env . config [ 'SOS_DEBUG' ] or 'ALL' in env . config [ 'SOS_DEBUG' ] : env . log_to_file ( 'ACTION' , f'Running command "{cmd}"' ) if env . config [ 'run_mode' ] == 'interactive' : from . utils import pexpect_run ret = pexpect_run ( cmd ) else : p = subprocess . Popen ( cmd , shell = True ) ret = p . wait ( ) except Exception as e : env . logger . error ( e ) if ret != 0 : temp_file = os . path . join ( '.sos' , f'pandoc_{os.getpid()}.md' ) shutil . copyfile ( input , temp_file ) cmd = interpolate ( f'pandoc {args}' , { 'input' : sos_targets ( temp_file ) , 'output' : sos_targets ( output ) } ) raise RuntimeError ( f'Failed to execute script. Please use command \n{cmd}\nunder {os.getcwd()} to test it.' ) if write_to_stdout : with open ( output [ 0 ] . fullname ( ) ) as out : sys . stdout . write ( out . read ( ) ) else : env . logger . info ( f'Report saved to {output}' ) try : os . remove ( input ) except Exception : pass
Convert input file to output using pandoc
53,531
def get_changed_vars ( section : SoS_Step ) : if 'shared' not in section . options : return set ( ) changed_vars = set ( ) svars = section . options [ 'shared' ] if isinstance ( svars , str ) : changed_vars . add ( svars ) svars = { svars : svars } elif isinstance ( svars , Sequence ) : for item in svars : if isinstance ( item , str ) : changed_vars . add ( item ) elif isinstance ( item , Mapping ) : changed_vars |= set ( item . keys ( ) ) else : raise ValueError ( f'Option shared should be a string, a mapping of expression, or list of string or mappings. {svars} provided' ) elif isinstance ( svars , Mapping ) : changed_vars |= set ( svars . keys ( ) ) else : raise ValueError ( f'Option shared should be a string, a mapping of expression, or list of string or mappings. {svars} provided' ) return changed_vars
changed vars are variables that are shared and therefore provides to others
53,532
def get_all_used_vars ( section ) : all_used_vars = set ( ) for statement in section . statements : if statement [ 0 ] == '=' : all_used_vars |= accessed_vars ( '=' . join ( statement [ 1 : 3 ] ) ) elif statement [ 0 ] == '!' : all_used_vars |= accessed_vars ( statement [ 1 ] ) elif statement [ 0 ] == ':' : all_used_vars |= accessed_vars ( statement [ 2 ] , mode = 'eval' ) if statement [ 1 ] != 'input' : continue if 'paired_with' in statement [ 2 ] : try : pws = get_names_of_param ( 'paired_with' , statement [ 2 ] , extra_dict = env . sos_dict . dict ( ) ) all_used_vars |= set ( pws ) except Exception as e : raise ValueError ( f'Failed to parse parameter paired_with: {e}' ) if 'group_with' in statement [ 2 ] : try : pws = get_names_of_param ( 'group_with' , statement [ 2 ] , extra_dict = env . sos_dict . dict ( ) ) all_used_vars |= set ( pws ) except Exception as e : raise ValueError ( f'Failed to parse parameter group_with: {e}' ) if 'for_each' in statement [ 2 ] : try : pws = get_names_of_param ( 'for_each' , statement [ 2 ] , extra_dict = env . sos_dict . dict ( ) ) for pw in pws : all_used_vars |= set ( pw . split ( ',' ) ) except Exception as e : raise ValueError ( f'Failed to parse parameter for_each: {e}' ) if section . task : all_used_vars |= accessed_vars ( section . task ) func_with_vars = [ y for x , y in used_in_func ( section . global_stmts ) . items ( ) if x in all_used_vars ] return set . union ( all_used_vars , * func_with_vars )
Get variables which are variables used by input statement and statements before it
53,533
def get_signature_vars ( section ) : signature_vars = set ( section . parameters . keys ( ) & accessed_vars ( strip_param_defs ( section . global_stmts ) ) ) input_idx = find_statement ( section , 'input' ) after_input_idx = 0 if input_idx is None else input_idx + 1 for statement in section . statements [ after_input_idx : ] : if statement [ 0 ] == '=' : signature_vars |= accessed_vars ( '=' . join ( statement [ 1 : 3 ] ) ) elif statement [ 0 ] == '!' : signature_vars |= accessed_vars ( statement [ 1 ] ) if section . task : signature_vars |= accessed_vars ( section . task ) return { x for x in signature_vars if not x . startswith ( '__' ) }
Get signature variables which are variables that will be saved with step signatures
53,534
def get_step_input ( section , default_input ) : step_input : sos_targets = sos_targets ( ) dynamic_input = True input_idx = find_statement ( section , 'input' ) if input_idx is None : return step_input , dynamic_input stmt = section . statements [ input_idx ] [ 2 ] try : svars = [ 'output_from' , 'named_output' , 'sos_step' , 'sos_variable' ] old_values = { x : env . sos_dict . dict ( ) [ x ] for x in svars if x in env . sos_dict . dict ( ) } env . sos_dict . quick_update ( { 'output_from' : lambda * args , ** kwargs : None , 'named_output' : lambda * args , ** kwargs : None , 'traced' : lambda * args , ** kwargs : sos_targets ( * args , ** kwargs ) , 'sos_step' : no_sos_step , 'sos_variable' : no_sos_variable , } ) args , kwargs = SoS_eval ( f'__null_func__({stmt})' , extra_dict = env . sos_dict . dict ( ) ) if not args : if default_input is None : step_input = sos_targets ( ) else : step_input = default_input elif not any ( isinstance ( x , ( dynamic , remote ) ) for x in args ) : step_input = sos_targets ( * args ) except SyntaxError : raise except Exception as e : env . logger . debug ( f'Input of step {section.name if section.index is None else f"{section.name}_{section.index}"} is set to Undertermined: {e}' ) step_input = sos_targets ( _undetermined = stmt ) finally : [ env . sos_dict . dict ( ) . pop ( x ) for x in svars ] env . sos_dict . quick_update ( old_values ) return step_input , dynamic_input
Find step input
53,535
def get_step_output ( section , default_output ) : step_output : sos_targets = sos_targets ( ) if 'provides' in section . options and default_output : step_output = default_output output_idx = find_statement ( section , 'output' ) if output_idx is None : return step_output value = section . statements [ output_idx ] [ 2 ] try : svars = [ 'output_from' , 'named_output' , 'sos_step' , 'sos_variable' ] old_values = { x : env . sos_dict . dict ( ) [ x ] for x in svars if x in env . sos_dict . dict ( ) } env . sos_dict . quick_update ( { 'output_from' : no_output_from , 'named_output' : no_named_output , 'sos_step' : no_sos_step , 'sos_variable' : no_sos_variable , } ) args , kwargs = SoS_eval ( f'__null_func__({value})' , extra_dict = env . sos_dict . dict ( ) ) if not any ( isinstance ( x , ( dynamic , remote ) ) for x in args ) : step_output = sos_targets ( * args , ** { x : y for x , y in kwargs . items ( ) if x not in SOS_TARGETS_OPTIONS } ) except SyntaxError : raise except Exception as e : if 'STEP' in env . config [ 'SOS_DEBUG' ] or 'ALL' in env . config [ 'SOS_DEBUG' ] : env . log_to_file ( 'STEP' , f"Args {value} cannot be determined: {e}" ) finally : [ env . sos_dict . dict ( ) . pop ( x ) for x in svars ] env . sos_dict . quick_update ( old_values ) if 'provides' in section . options and default_output is not None and step_output . valid ( ) : for out in default_output : if not isinstance ( out , sos_step ) and out not in step_output : raise ValueError ( f'Defined output fail to produce expected output: {step_output} generated, {default_output} expected.' ) return step_output
determine step output
53,536
def analyze_section ( section : SoS_Step , default_input : Optional [ sos_targets ] = None , default_output : Optional [ sos_targets ] = None , context = { } , vars_and_output_only : bool = False ) -> Dict [ str , Any ] : new_env , old_env = env . request_new ( ) try : prepare_env ( section . global_def , section . global_vars , context ) env . sos_dict . set ( 'step_name' , section . step_name ( ) ) env . sos_dict . set ( '__null_func__' , __null_func__ ) if 'STEP' in env . config [ 'SOS_DEBUG' ] or 'ALL' in env . config [ 'SOS_DEBUG' ] : env . log_to_file ( 'STEP' , f'Analyzing {section.step_name()} {"(output only)" if vars_and_output_only else ""}' ) res = { 'step_name' : section . step_name ( ) , 'step_output' : get_step_output ( section , default_output ) , 'environ_vars' : get_environ_vars ( section ) , 'signature_vars' : get_signature_vars ( section ) , 'changed_vars' : get_changed_vars ( section ) } if not vars_and_output_only : inps = get_step_input ( section , default_input ) res [ 'step_input' ] = inps [ 0 ] res [ 'dynamic_input' ] = inps [ 1 ] deps = get_step_depends ( section ) res [ 'step_depends' ] = deps [ 0 ] res [ 'dynamic_depends' ] = deps [ 1 ] finally : env . restore_to_old ( new_env , old_env ) section . global_vars = { x : y for x , y in section . global_vars . items ( ) if x in get_all_used_vars ( section ) } return res
Analyze a section for how it uses input and output what variables it uses and input output etc .
53,537
def extract_workflow ( notebook ) : if isinstance ( notebook , str ) : nb = nbformat . read ( notebook , nbformat . NO_CONVERT ) else : nb = notebook cells = nb . cells content = '#!/usr/bin/env sos-runner\n#fileformat=SOS1.0\n\n' for cell in cells : if cell . cell_type != "code" : continue if 'kernel' in cell . metadata and cell . metadata [ 'kernel' ] not in ( 'sos' , 'SoS' , None ) : continue lines = cell . source . split ( '\n' ) valid_cell = False for idx , line in enumerate ( lines ) : if valid_cell or ( line . startswith ( '%include' ) or line . startswith ( '%from' ) ) : content += line + '\n' elif SOS_SECTION_HEADER . match ( line ) : valid_cell = True c = idx - 1 comment = '' while c >= 0 and lines [ c ] . startswith ( '#' ) : comment = lines [ c ] + '\n' + comment c -= 1 content += comment + line + '\n' if valid_cell : content += '\n' return content
Extract workflow from a notebook file or notebook JSON instance
53,538
def vim_ipython_is_open ( ) : for w in vim . windows : if w . buffer . name is not None and w . buffer . name . endswith ( "vim-ipython" ) : return True return False
Helper function to let us know if the vim - ipython shell is currently visible
53,539
def with_subchannel ( f , * args ) : "conditionally monitor subchannel" def f_with_update ( * args ) : try : f ( * args ) if monitor_subchannel : update_subchannel_msgs ( force = True ) except AttributeError : echo ( "not connected to IPython" , 'Error' ) return f_with_update
conditionally monitor subchannel
53,540
def set_pid ( ) : global pid lines = '\n' . join ( [ 'import os' , '_pid = os.getpid()' ] ) try : msg_id = send ( lines , silent = True , user_variables = [ '_pid' ] ) except TypeError : msg_id = send ( lines , silent = True , user_expressions = { '_pid' : '_pid' } ) try : child = get_child_msg ( msg_id ) except Empty : echo ( "no reply from IPython kernel" ) return try : pid = int ( child [ 'content' ] [ 'user_variables' ] [ '_pid' ] ) except TypeError : pid = int ( child [ 'content' ] [ 'user_variables' ] [ '_pid' ] [ 'data' ] [ 'text/plain' ] ) except KeyError : pid = int ( child [ 'content' ] [ 'user_expressions' ] [ '_pid' ] [ 'data' ] [ 'text/plain' ] ) except KeyError : echo ( "Could not get PID information, kernel not running Python?" ) return pid
Explicitly ask the ipython kernel for its pid
53,541
def fetchmany ( self , size = - 1 ) : self . _cursorLock . acquire ( ) if size < 0 or size > self . MAX_BLOCK_SIZE : size = self . arraysize recs = [ ] for i in range ( 0 , size ) : recs . append ( self . fetchone ( ) ) self . _cursorLock . release ( ) return recs
return a sequential set of records . This is guaranteed by locking so that no other thread can grab a few records while a set is fetched . this has the side effect that other threads may have to wait for an arbitrary long time for the completion of the current request .
53,542
def on_number ( self , ctx , value ) : value = int ( value ) if value . isdigit ( ) else float ( value ) top = self . _stack [ - 1 ] if top is JSONCompositeType . OBJECT : self . fire ( JSONStreamer . VALUE_EVENT , value ) elif top is JSONCompositeType . ARRAY : self . fire ( JSONStreamer . ELEMENT_EVENT , value ) else : raise RuntimeError ( 'Invalid json-streamer state' )
Since this is defined both integer and double callbacks are useless
53,543
def close ( self ) : self . fire ( JSONStreamer . DOC_END_EVENT ) self . _stack = None self . _parser . close ( )
Closes the streamer which causes a DOC_END_EVENT to be fired and frees up memory used by yajl
53,544
async def minizinc ( mzn , * dzn_files , args = None , data = None , include = None , stdlib_dir = None , globals_dir = None , declare_enums = True , allow_multiple_assignments = False , keep = False , output_vars = None , output_base = None , output_mode = 'dict' , solver = None , timeout = None , two_pass = None , pre_passes = None , output_objective = False , non_unique = False , all_solutions = False , num_solutions = None , free_search = False , parallel = None , seed = None , rebase_arrays = True , keep_solutions = True , return_enums = False , max_queue_size = 0 , ** kwargs ) : mzn_file , dzn_files , data_file , data , keep , _output_mode , types = _minizinc_preliminaries ( mzn , * dzn_files , args = args , data = data , include = include , stdlib_dir = stdlib_dir , globals_dir = globals_dir , output_vars = output_vars , keep = keep , output_base = output_base , output_mode = output_mode , declare_enums = declare_enums , allow_multiple_assignments = allow_multiple_assignments ) if not solver : solver = config . get ( 'solver' , gecode ) solver_args = { ** kwargs , ** config . get ( 'solver_args' , { } ) } proc = await solve ( solver , mzn_file , * dzn_files , data = data , include = include , stdlib_dir = stdlib_dir , globals_dir = globals_dir , output_mode = _output_mode , timeout = timeout , two_pass = two_pass , pre_passes = pre_passes , output_objective = output_objective , non_unique = non_unique , all_solutions = all_solutions , num_solutions = num_solutions , free_search = free_search , parallel = parallel , seed = seed , allow_multiple_assignments = allow_multiple_assignments , ** solver_args ) if output_mode == 'raw' : solns = asyncio . Queue ( maxsize = max_queue_size ) task = asyncio . create_task ( _collect ( proc , solns ) ) else : parser = AsyncSolutionParser ( solver , output_mode = output_mode , rebase_arrays = rebase_arrays , types = types , keep_solutions = keep_solutions , return_enums = return_enums , max_queue_size = max_queue_size ) solns = await parser . parse ( proc ) task = parser . parse_task if not keep : task . add_done_callback ( partial ( _cleanup_cb , [ mzn_file , data_file ] ) ) return solns
Coroutine version of the pymzn . minizinc function .
53,545
def parse_value ( val , var_type = None , enums = None , rebase_arrays = True ) : if not var_type : p_val = _parse_array ( val , rebase_arrays = rebase_arrays , enums = enums , raise_errors = False ) if p_val is not None : return p_val return _parse_val ( val , enums = enums ) if 'dims' in var_type : return _parse_array ( val , rebase_arrays = rebase_arrays , var_type = var_type , enums = enums ) return _parse_val ( val , var_type = var_type , enums = enums )
Parses the value of a dzn statement .
53,546
def dzn2dict ( dzn , * , rebase_arrays = True , types = None , return_enums = False ) : dzn_ext = os . path . splitext ( dzn ) [ 1 ] if dzn_ext == '.dzn' : with open ( dzn ) as f : dzn = f . read ( ) var_types = None if types : var_types = { } for var , var_type in types . items ( ) : if isinstance ( var_type , str ) : var_types [ var ] = _to_var_type ( var , var_type ) elif isinstance ( var_type , dict ) : var_types [ var ] = var_type else : err = 'Type of variable {} must be a string or a dict.' raise ValueError ( err . format ( var ) ) enum_types = None if var_types : enum_types = [ ] for var , var_type in var_types . items ( ) : if 'enum_type' in var_type and var_type [ 'enum_type' ] == var : enum_types . append ( var ) var_list = [ ] dzn = _comm_p . sub ( '\n' , dzn ) stmts = _stmt_p . findall ( dzn ) for stmt in stmts : var_m = _var_p . match ( stmt ) if var_m : var = var_m . group ( 'var' ) val = var_m . group ( 'val' ) var_list . append ( ( var , val ) ) else : raise ValueError ( 'Unsupported parsing for statement:\n{}' . format ( repr ( stmt ) ) ) enums = None if enum_types : enums = { } remaining = [ ] while len ( var_list ) > 0 : var , val = var_list . pop ( 0 ) if var in enum_types : enum = None enum_m = _enum_p . match ( val ) if enum_m : vals = enum_m . group ( 'vals' ) . strip ( ) if vals : enum_vals = _parse_enum_vals ( vals . split ( ',' ) ) enum = IntEnum ( var , { v : i + 1 for i , v in enumerate ( enum_vals ) } ) if enum is None : raise ValueError ( 'Cannot parse enum type \'{} = {}\'.' . format ( var , val ) ) enums [ var ] = enum else : remaining . append ( ( var , val ) ) var_list = remaining assign = { } for var , val in var_list : var_type = None if var_types : var_type = var_types . get ( var , None ) assign [ var ] = parse_value ( val , var_type = var_type , enums = enums , rebase_arrays = rebase_arrays ) if return_enums and enums : assign . update ( enums ) return assign
Parses a dzn string or file into a dictionary of variable assignments .
53,547
def args ( self , all_solutions = False , num_solutions = None , free_search = False , parallel = None , seed = None , ** kwargs ) : args = [ '-s' , '-v' ] if all_solutions : args . append ( '-a' ) if num_solutions is not None : args += [ '-n' , num_solutions ] if free_search : args . append ( '-f' ) if parallel is not None : args += [ '-p' , parallel ] if seed is not None : args += [ '-r' , seed ] return args
Returns a list of command line arguments for the specified options .
53,548
def debug ( dbg = True ) : global _debug_handler if dbg and _debug_handler is None : _debug_handler = logging . StreamHandler ( ) logger . addHandler ( _debug_handler ) logger . setLevel ( logging . DEBUG ) elif not dbg and _debug_handler is not None : logger . removeHandler ( _debug_handler ) _debug_handler = None logger . setLevel ( logging . WARNING )
Enables or disables debugging messages on the standard output .
53,549
def minizinc_version ( ) : vs = _run_minizinc ( '--version' ) m = re . findall ( 'version ([\d\.]+)' , vs ) if not m : raise RuntimeError ( 'MiniZinc executable not found.' ) return m [ 0 ]
Returns the version of the found minizinc executable .
53,550
def preprocess_model ( model , rewrap = True , ** kwargs ) : args = { ** kwargs , ** config . get ( 'args' , { } ) } model = _process_template ( model , ** args ) if rewrap : model = rewrap_model ( model ) return model
Preprocess a MiniZinc model .
53,551
def save_model ( model , output_file = None , output_dir = None , output_prefix = 'pymzn' ) : if output_file : mzn_file = output_file output_file = open ( output_file , 'w+' , buffering = 1 ) else : output_prefix += '_' output_file = NamedTemporaryFile ( dir = output_dir , prefix = output_prefix , suffix = '.mzn' , delete = False , mode = 'w+' , buffering = 1 ) mzn_file = output_file . name output_file . write ( model ) output_file . close ( ) logger . info ( 'Generated file {}' . format ( mzn_file ) ) return mzn_file
Save a model to file .
53,552
def check_instance ( mzn , * dzn_files , data = None , include = None , stdlib_dir = None , globals_dir = None , allow_multiple_assignments = False ) : args = [ '--instance-check-only' ] args += _flattening_args ( mzn , * dzn_files , data = data , include = include , stdlib_dir = stdlib_dir , globals_dir = globals_dir , allow_multiple_assignments = allow_multiple_assignments ) input = mzn if args [ - 1 ] == '-' else None proc = _run_minizinc_proc ( * args , input = input ) if proc . stderr_data : raise MiniZincError ( mzn if input is None else '\n' + mzn + '\n' , args , proc . stderr_data )
Perform instance checking on a model + data .
53,553
def check_model ( mzn , * , include = None , stdlib_dir = None , globals_dir = None ) : args = [ '--model-check-only' ] args += _flattening_args ( mzn , include = include , stdlib_dir = stdlib_dir , globals_dir = globals_dir ) input = mzn if args [ - 1 ] == '-' else None proc = _run_minizinc_proc ( * args , input = input ) if proc . stderr_data : raise MiniZincError ( mzn if input is None else '\n' + mzn + '\n' , args , proc . stderr_data )
Perform model checking on a given model .
53,554
def minizinc ( mzn , * dzn_files , args = None , data = None , include = None , stdlib_dir = None , globals_dir = None , declare_enums = True , allow_multiple_assignments = False , keep = False , output_vars = None , output_base = None , output_mode = 'dict' , solver = None , timeout = None , two_pass = None , pre_passes = None , output_objective = False , non_unique = False , all_solutions = False , num_solutions = None , free_search = False , parallel = None , seed = None , rebase_arrays = True , keep_solutions = True , return_enums = False , ** kwargs ) : mzn_file , dzn_files , data_file , data , keep , _output_mode , types = _minizinc_preliminaries ( mzn , * dzn_files , args = args , data = data , include = include , stdlib_dir = stdlib_dir , globals_dir = globals_dir , output_vars = output_vars , keep = keep , output_base = output_base , output_mode = output_mode , declare_enums = declare_enums , allow_multiple_assignments = allow_multiple_assignments ) if not solver : solver = config . get ( 'solver' , gecode ) solver_args = { ** kwargs , ** config . get ( 'solver_args' , { } ) } proc = solve ( solver , mzn_file , * dzn_files , data = data , include = include , stdlib_dir = stdlib_dir , globals_dir = globals_dir , output_mode = _output_mode , timeout = timeout , two_pass = two_pass , pre_passes = pre_passes , output_objective = output_objective , non_unique = non_unique , all_solutions = all_solutions , num_solutions = num_solutions , free_search = free_search , parallel = parallel , seed = seed , allow_multiple_assignments = allow_multiple_assignments , ** solver_args ) if not keep : _cleanup ( [ mzn_file , data_file ] ) if output_mode == 'raw' : return proc . stdout_data parser = SolutionParser ( solver , output_mode = output_mode , rebase_arrays = rebase_arrays , types = types , keep_solutions = keep_solutions , return_enums = return_enums ) solns = parser . parse ( proc ) return solns
Implements the workflow for solving a CSP problem encoded with MiniZinc .
53,555
def solve ( solver , mzn , * dzn_files , data = None , include = None , stdlib_dir = None , globals_dir = None , allow_multiple_assignments = False , output_mode = 'item' , timeout = None , two_pass = None , pre_passes = None , output_objective = False , non_unique = False , all_solutions = False , num_solutions = None , free_search = False , parallel = None , seed = None , ** kwargs ) : args = _solve_args ( solver , timeout = timeout , two_pass = two_pass , pre_passes = pre_passes , output_objective = output_objective , non_unique = non_unique , all_solutions = all_solutions , num_solutions = num_solutions , free_search = free_search , parallel = parallel , seed = seed , ** kwargs ) args += _flattening_args ( mzn , * dzn_files , data = data , stdlib_dir = stdlib_dir , globals_dir = globals_dir , output_mode = output_mode , include = include , allow_multiple_assignments = allow_multiple_assignments ) input = mzn if args [ - 1 ] == '-' else None t0 = _time ( ) try : proc = _run_minizinc_proc ( * args , input = input ) except RuntimeError as err : raise MiniZincError ( mzn_file , args ) from err solve_time = _time ( ) - t0 logger . info ( 'Solving completed in {:>3.2f} sec' . format ( solve_time ) ) return proc
Flatten and solve a MiniZinc program .
53,556
def mzn2fzn ( mzn , * dzn_files , args = None , data = None , include = None , stdlib_dir = None , globals_dir = None , declare_enums = True , allow_multiple_assignments = False , keep = False , output_vars = None , output_base = None , output_mode = 'item' , no_ozn = False ) : mzn_file , dzn_files , data_file , data , keep , _output_mode , types = _minizinc_preliminaries ( mzn , * dzn_files , args = args , data = data , include = include , stdlib_dir = stdlib_dir , globals_dir = globals_dir , output_vars = output_vars , keep = keep , output_base = output_base , output_mode = output_mode , declare_enums = declare_enums , allow_multiple_assignments = allow_multiple_assignments ) args = [ '--compile' ] args += _flattening_args ( mzn_file , * dzn_files , data = data , stdlib_dir = stdlib_dir , globals_dir = globals_dir , output_mode = output_mode , include = include , no_ozn = no_ozn , output_base = output_base , allow_multiple_assignments = allow_multiple_assignments ) t0 = _time ( ) _run_minizinc ( * args ) flattening_time = _time ( ) - t0 logger . info ( 'Flattening completed in {:>3.2f} sec' . format ( flattening_time ) ) if not keep : with contextlib . suppress ( FileNotFoundError ) : if data_file : os . remove ( data_file ) logger . info ( 'Deleted file: {}' . format ( data_file ) ) if output_base : mzn_base = output_base else : mzn_base = os . path . splitext ( mzn_file ) [ 0 ] fzn_file = '.' . join ( [ mzn_base , 'fzn' ] ) fzn_file = fzn_file if os . path . isfile ( fzn_file ) else None ozn_file = '.' . join ( [ mzn_base , 'ozn' ] ) ozn_file = ozn_file if os . path . isfile ( ozn_file ) else None if fzn_file : logger . info ( 'Generated file: {}' . format ( fzn_file ) ) if ozn_file : logger . info ( 'Generated file: {}' . format ( ozn_file ) ) return fzn_file , ozn_file
Flatten a MiniZinc model into a FlatZinc one .
53,557
def print ( self , output_file = sys . stdout , log = False ) : for soln in iter ( self ) : print ( soln , file = output_file ) print ( SOLN_SEP , file = output_file ) if self . status == 0 : print ( SEARCH_COMPLETE , file = output_file ) if ( self . status == 1 and self . _n_solns == 0 ) or self . status >= 2 : print ( { Status . INCOMPLETE : ERROR , Status . UNKNOWN : UNKNOWN , Status . UNSATISFIABLE : UNSATISFIABLE , Status . UNBOUNDED : UNBOUNDED , Status . UNSATorUNBOUNDED : UNSATorUNBOUNDED , Status . ERROR : ERROR } [ self . status ] , file = output_file ) if self . stderr : print ( self . stderr . strip ( ) , file = sys . stderr ) elif log : print ( str ( self . log ) , file = output_file )
Print the solution stream
53,558
def dump ( self ) : try : import yaml cfg_file = self . _cfg_file ( ) cfg_dir , __ = os . path . split ( cfg_file ) os . makedirs ( cfg_dir , exist_ok = True ) with open ( cfg_file , 'w' ) as f : yaml . dump ( self , f ) except ImportError as err : raise RuntimeError ( 'Cannot dump the configuration settings to file. You need to ' 'install the necessary dependencies (pyyaml, appdirs).' ) from err
Writes the changes to the configuration file .
53,559
def discretize ( value , factor = 100 ) : if not isinstance ( value , Iterable ) : return int ( value * factor ) int_value = list ( deepcopy ( value ) ) for i in range ( len ( int_value ) ) : int_value [ i ] = int ( int_value [ i ] * factor ) return int_value
Discretize the given value pre - multiplying by the given factor
53,560
def from_string ( source , args = None ) : if _has_jinja : logger . info ( 'Precompiling model with arguments: {}' . format ( args ) ) return _jenv . from_string ( source ) . render ( args or { } ) if args : raise RuntimeError ( _except_text ) return source
Renders a template string
53,561
def add_package ( package_name , package_path = 'templates' , encoding = 'utf-8' ) : if not _has_jinja : raise RuntimeError ( _except_text ) _jload . add_loader ( PackageLoader ( package_name , package_path , encoding ) )
Adds the given package to the template search routine
53,562
def add_path ( searchpath , encoding = 'utf-8' , followlinks = False ) : if not _has_jinja : raise RuntimeError ( _except_text ) _jload . add_loader ( FileSystemLoader ( searchpath , encoding , followlinks ) )
Adds the given path to the template search routine
53,563
def val2dzn ( val , wrap = True ) : if _is_value ( val ) : dzn_val = _dzn_val ( val ) elif _is_set ( val ) : dzn_val = _dzn_set ( val ) elif _is_array_type ( val ) : dzn_val = _dzn_array_nd ( val ) else : raise TypeError ( 'Unsupported serialization of value: {}' . format ( repr ( val ) ) ) if wrap : wrapper = _get_wrapper ( ) dzn_val = wrapper . fill ( dzn_val ) return dzn_val
Serializes a value into its dzn representation .
53,564
def stmt2dzn ( name , val , declare = True , assign = True , wrap = True ) : if not ( declare or assign ) : raise ValueError ( 'The statement must be a declaration or an assignment.' ) stmt = [ ] if declare : val_type = _dzn_type ( val ) stmt . append ( '{}: ' . format ( val_type ) ) stmt . append ( name ) if assign : val_str = val2dzn ( val , wrap = wrap ) stmt . append ( ' = {}' . format ( val_str ) ) stmt . append ( ';' ) return '' . join ( stmt )
Returns a dzn statement declaring and assigning the given value .
53,565
def stmt2enum ( enum_type , declare = True , assign = True , wrap = True ) : if not ( declare or assign ) : raise ValueError ( 'The statement must be a declaration or an assignment.' ) stmt = [ ] if declare : stmt . append ( 'enum ' ) stmt . append ( enum_type . __name__ ) if assign : val_str = [ ] for v in list ( enum_type ) : val_str . append ( v . name ) val_str = '' . join ( [ '{' , ',' . join ( val_str ) , '}' ] ) if wrap : wrapper = _get_wrapper ( ) val_str = wrapper . fill ( val_str ) stmt . append ( ' = {}' . format ( val_str ) ) stmt . append ( ';' ) return '' . join ( stmt )
Returns a dzn enum declaration from an enum type .
53,566
def dict2dzn ( objs , declare = False , assign = True , declare_enums = True , wrap = True , fout = None ) : log = logging . getLogger ( __name__ ) vals = [ ] enums = set ( ) for key , val in objs . items ( ) : if _is_enum ( val ) and declare_enums : enum_type = type ( val ) enum_name = enum_type . __name__ if enum_name not in enums : enum_stmt = stmt2enum ( enum_type , declare = declare , assign = assign , wrap = wrap ) vals . append ( enum_stmt ) enums . add ( enum_name ) stmt = stmt2dzn ( key , val , declare = declare , assign = assign , wrap = wrap ) vals . append ( stmt ) if fout : log . debug ( 'Writing file: {}' . format ( fout ) ) with open ( fout , 'w' ) as f : for val in vals : f . write ( '{}\n\n' . format ( val ) ) return vals
Serializes the objects in input and produces a list of strings encoding them into dzn format . Optionally the produced dzn is written on a file .
53,567
def async_or_eager ( self , ** options ) : args = options . pop ( "args" , None ) kwargs = options . pop ( "kwargs" , None ) possible_broker_errors = self . _get_possible_broker_errors_tuple ( ) try : return self . apply_async ( args , kwargs , ** options ) except possible_broker_errors : return self . apply ( args , kwargs , ** options )
Attempt to call self . apply_async or if that fails because of a problem with the broker run the task eagerly and return an EagerResult .
53,568
def async_or_fail ( self , ** options ) : args = options . pop ( "args" , None ) kwargs = options . pop ( "kwargs" , None ) possible_broker_errors = self . _get_possible_broker_errors_tuple ( ) try : return self . apply_async ( args , kwargs , ** options ) except possible_broker_errors as e : return self . simulate_async_error ( e )
Attempt to call self . apply_async but if that fails with an exception we fake the task completion using the exception as the result . This allows us to seamlessly handle errors on task creation the same way we handle errors when a task runs simplifying the user interface .
53,569
def delay_or_eager ( self , * args , ** kwargs ) : return self . async_or_eager ( args = args , kwargs = kwargs )
Wrap async_or_eager with a convenience signiture like delay
53,570
def delay_or_run ( self , * args , ** kwargs ) : warnings . warn ( "delay_or_run is deprecated. Please use delay_or_eager" , DeprecationWarning , ) possible_broker_errors = self . _get_possible_broker_errors_tuple ( ) try : result = self . apply_async ( args = args , kwargs = kwargs ) required_fallback = False except possible_broker_errors : result = self ( ) . run ( * args , ** kwargs ) required_fallback = True return result , required_fallback
Attempt to call self . delay or if that fails call self . run .
53,571
def delay_or_fail ( self , * args , ** kwargs ) : return self . async_or_fail ( args = args , kwargs = kwargs )
Wrap async_or_fail with a convenience signiture like delay
53,572
def simulate_async_error ( self , exception ) : task_id = gen_unique_id ( ) async_result = self . AsyncResult ( task_id ) einfo = ExceptionInfo ( sys . exc_info ( ) ) async_result . backend . mark_as_failure ( task_id , exception , traceback = einfo . traceback , ) return async_result
Take this exception and store it as an error in the result backend . This unifies the handling of broker - connection errors with any other type of error that might occur when running the task . So the same error - handling that might retry a task or display a useful message to the user can also handle this error .
53,573
def calc_progress ( self , completed_count , total_count ) : self . logger . debug ( "calc_progress(%s, %s)" , completed_count , total_count , ) current_time = time . time ( ) time_spent = current_time - self . start_time self . logger . debug ( "Progress time spent: %s" , time_spent ) if total_count == 0 : return 100 , 1 completion_fraction = completed_count / total_count if completion_fraction == 0 : completion_fraction = 1 total_time = 0 total_time = time_spent / completion_fraction time_remaining = total_time - time_spent completion_display = completion_fraction * 100 if completion_display == 100 : return 100 , 1 return completion_display , time_remaining
Calculate the percentage progress and estimated remaining time based on the current number of items completed of the total .
53,574
def update_progress ( self , completed_count , total_count , update_frequency = 1 , ) : if completed_count - self . _last_update_count < update_frequency : return progress_percent , time_remaining = self . calc_progress ( completed_count , total_count ) self . logger . debug ( "Updating progress: %s percent, %s remaining" , progress_percent , time_remaining ) if self . request . id : self . _last_update_count = completed_count self . update_state ( None , PROGRESS , { "progress_percent" : progress_percent , "time_remaining" : time_remaining , } )
Update the task backend with both an estimated percentage complete and number of seconds remaining until completion .
53,575
def _validate_required_class_vars ( self ) : required_members = ( 'significant_kwargs' , 'herd_avoidance_timeout' , ) for required_member in required_members : if not hasattr ( self , required_member ) : raise Exception ( "JobtasticTask's must define a %s" % required_member )
Ensure that this subclass has defined all of the required class variables .
53,576
def on_success ( self , retval , task_id , args , kwargs ) : if self . request . is_eager : self . update_state ( task_id , SUCCESS , retval )
Store results in the backend even if we re always eager . This ensures the delay_or_run calls always at least have results .
53,577
def _get_cache ( self ) : if not self . _cache : self . _cache = get_cache ( self . app ) return self . _cache
Return the cache to use for thundering herd protection etc .
53,578
def _get_cache_key ( self , ** kwargs ) : m = md5 ( ) for significant_kwarg in self . significant_kwargs : key , to_str = significant_kwarg try : m . update ( to_str ( kwargs [ key ] ) ) except ( TypeError , UnicodeEncodeError ) : m . update ( to_str ( kwargs [ key ] ) . encode ( 'utf-8' ) ) if hasattr ( self , 'cache_prefix' ) : cache_prefix = self . cache_prefix else : cache_prefix = '%s.%s' % ( self . __module__ , self . __name__ ) return '%s:%s' % ( cache_prefix , m . hexdigest ( ) )
Take this task s configured significant_kwargs and build a hash that all equivalent task calls will match .
53,579
def get_cache ( app ) : jobtastic_cache_setting = app . conf . get ( 'JOBTASTIC_CACHE' ) if isinstance ( jobtastic_cache_setting , BaseCache ) : return jobtastic_cache_setting if 'Django' in CACHES : if jobtastic_cache_setting : try : return WrappedCache ( get_django_cache ( jobtastic_cache_setting ) ) except InvalidCacheBackendError : pass else : return WrappedCache ( get_django_cache ( 'default' ) ) if 'Werkzeug' in CACHES : if jobtastic_cache_setting : backend , url = get_backend_by_url ( jobtastic_cache_setting ) backend = backend ( app = app , url = url ) else : backend = app . backend if isinstance ( backend , CacheBackend ) : return WrappedCache ( MemcachedCache ( backend . client ) ) elif isinstance ( backend , RedisBackend ) : return WrappedCache ( RedisCache ( backend . client ) ) raise RuntimeError ( 'Cannot find a suitable cache for Jobtastic' )
Attempt to find a valid cache from the Celery configuration
53,580
def select ( * args ) : def select_columns ( df , args ) : columns = [ column . _name for column in args ] if df . _grouped_on : for col in df . _grouped_on [ : : - 1 ] : if col not in columns : columns . insert ( 0 , col ) return columns return lambda df : df [ select_columns ( df , args ) ]
Select specific columns from DataFrame .
53,581
def arrange ( * args ) : names = [ column . _name for column in args ] def f ( df ) : sortby_df = df >> mutate ( * args ) index = sortby_df . sort_values ( [ str ( arg ) for arg in args ] ) . index return df . loc [ index ] return f
Sort DataFrame by the input column arguments .
53,582
def rename ( ** kwargs ) : def rename_columns ( df ) : column_assignments = { old_name_later . _name : new_name for new_name , old_name_later in kwargs . items ( ) } return df . rename ( columns = column_assignments ) return rename_columns
Rename one or more columns leaving other columns unchanged
53,583
def transmute ( * args , ** kwargs ) : mutate_dateframe_fn = mutate ( * args , ** dict ( kwargs ) ) column_names_args = [ str ( arg ) for arg in args ] column_names_kwargs = [ name for name , _ in _dict_to_possibly_ordered_tuples ( kwargs ) ] column_names = column_names_args + column_names_kwargs return lambda df : mutate_dateframe_fn ( df ) [ column_names ]
Similar to select but allows mutation in column definitions .
53,584
def get_join_cols ( by_entry ) : left_cols = [ ] right_cols = [ ] for col in by_entry : if isinstance ( col , str ) : left_cols . append ( col ) right_cols . append ( col ) else : left_cols . append ( col [ 0 ] ) right_cols . append ( col [ 1 ] ) return left_cols , right_cols
helper function used for joins builds left and right join list for join function
53,585
def mutating_join ( * args , ** kwargs ) : left = args [ 0 ] right = args [ 1 ] if 'by' in kwargs : left_cols , right_cols = get_join_cols ( kwargs [ 'by' ] ) else : left_cols , right_cols = None , None if 'suffixes' in kwargs : dsuffixes = kwargs [ 'suffixes' ] else : dsuffixes = ( '_x' , '_y' ) if left . _grouped_on : outDf = ( DplyFrame ( ( left >> ungroup ( ) ) . merge ( right , how = kwargs [ 'how' ] , left_on = left_cols , right_on = right_cols , suffixes = dsuffixes ) ) . regroup ( left . _grouped_on ) ) else : outDf = DplyFrame ( left . merge ( right , how = kwargs [ 'how' ] , left_on = left_cols , right_on = right_cols , suffixes = dsuffixes ) ) return outDf
generic function for mutating dplyr - style joins
53,586
def _chart_support ( self , name , data , caller , ** kwargs ) : "template chart support function" id = 'chart-%s' % next ( self . id ) name = self . _chart_class_name ( name ) options = dict ( self . environment . options ) options . update ( name = name , id = id ) if jinja2 . __version__ >= '2.9' : kwargs = dict ( ( k [ 4 : ] , v ) for ( k , v ) in kwargs . items ( ) ) else : kwargs = dict ( ( k [ 2 : ] , v ) for ( k , v ) in kwargs . items ( ) ) if self . _library is None : self . _library = self . load_library ( ) id = kwargs . get ( 'id' , '' ) library = self . _library . get ( id , { } ) library . update ( kwargs . get ( 'library' , { } ) ) kwargs . update ( library = library ) options . update ( kwargs ) return CHART_HTML . format ( data = data , options = json . dumps ( kwargs ) , ** options )
template chart support function
53,587
def load_library ( self ) : "loads configuration options" try : filename = self . environment . get_template ( 'chartkick.json' ) . filename except TemplateNotFound : return { } else : options = Options ( ) options . load ( filename ) return options
loads configuration options
53,588
def js ( ) : "returns home directory of js" return os . path . join ( os . path . dirname ( os . path . abspath ( __file__ ) ) , 'js' )
returns home directory of js
53,589
def parse_options ( source ) : options = { } tokens = [ t . strip ( ) for t in source . split ( '=' ) ] name = tokens [ 0 ] for token in tokens [ 1 : - 1 ] : value , next_name = token . rsplit ( ' ' , 1 ) options [ name . strip ( ) ] = value name = next_name options [ name . strip ( ) ] = tokens [ - 1 ] . strip ( ) return options
parses chart tag options
53,590
def copy ( self ) : return RigidTransform ( np . copy ( self . rotation ) , np . copy ( self . translation ) , self . from_frame , self . to_frame )
Returns a copy of the RigidTransform .
53,591
def _check_valid_rotation ( self , rotation ) : if not isinstance ( rotation , np . ndarray ) or not np . issubdtype ( rotation . dtype , np . number ) : raise ValueError ( 'Rotation must be specified as numeric numpy array' ) if len ( rotation . shape ) != 2 or rotation . shape [ 0 ] != 3 or rotation . shape [ 1 ] != 3 : raise ValueError ( 'Rotation must be specified as a 3x3 ndarray' ) if np . abs ( np . linalg . det ( rotation ) - 1.0 ) > 1e-3 : raise ValueError ( 'Illegal rotation. Must have determinant == 1.0' )
Checks that the given rotation matrix is valid .
53,592
def _check_valid_translation ( self , translation ) : if not isinstance ( translation , np . ndarray ) or not np . issubdtype ( translation . dtype , np . number ) : raise ValueError ( 'Translation must be specified as numeric numpy array' ) t = translation . squeeze ( ) if len ( t . shape ) != 1 or t . shape [ 0 ] != 3 : raise ValueError ( 'Translation must be specified as a 3-vector, 3x1 ndarray, or 1x3 ndarray' )
Checks that the translation vector is valid .
53,593
def interpolate_with ( self , other_tf , t ) : if t < 0 or t > 1 : raise ValueError ( 'Must interpolate between 0 and 1' ) interp_translation = ( 1.0 - t ) * self . translation + t * other_tf . translation interp_rotation = transformations . quaternion_slerp ( self . quaternion , other_tf . quaternion , t ) interp_tf = RigidTransform ( rotation = interp_rotation , translation = interp_translation , from_frame = self . from_frame , to_frame = self . to_frame ) return interp_tf
Interpolate with another rigid transformation .
53,594
def linear_trajectory_to ( self , target_tf , traj_len ) : if traj_len < 0 : raise ValueError ( 'Traj len must at least 0' ) delta_t = 1.0 / ( traj_len + 1 ) t = 0.0 traj = [ ] while t < 1.0 : traj . append ( self . interpolate_with ( target_tf , t ) ) t += delta_t traj . append ( target_tf ) return traj
Creates a trajectory of poses linearly interpolated from this tf to a target tf .
53,595
def apply ( self , points ) : if not isinstance ( points , BagOfPoints ) : raise ValueError ( 'Rigid transformations can only be applied to bags of points' ) if points . dim != 3 : raise ValueError ( 'Rigid transformations can only be applied to 3-dimensional points' ) if points . frame != self . _from_frame : raise ValueError ( 'Cannot transform points in frame %s with rigid transformation from frame %s to frame %s' % ( points . frame , self . _from_frame , self . _to_frame ) ) if isinstance ( points , BagOfVectors ) : x = points . data x_tf = self . rotation . dot ( x ) else : x = points . data if len ( x . shape ) == 1 : x = x [ : , np . newaxis ] x_homog = np . r_ [ x , np . ones ( [ 1 , points . num_points ] ) ] x_homog_tf = self . matrix . dot ( x_homog ) x_tf = x_homog_tf [ 0 : 3 , : ] if isinstance ( points , PointCloud ) : return PointCloud ( x_tf , frame = self . _to_frame ) elif isinstance ( points , Point ) : return Point ( x_tf , frame = self . _to_frame ) elif isinstance ( points , Direction ) : return Direction ( x_tf , frame = self . _to_frame ) elif isinstance ( points , NormalCloud ) : return NormalCloud ( x_tf , frame = self . _to_frame ) raise ValueError ( 'Type %s not yet supported' % ( type ( points ) ) )
Applies the rigid transformation to a set of 3D objects .
53,596
def dot ( self , other_tf ) : if other_tf . to_frame != self . from_frame : raise ValueError ( 'To frame of right hand side ({0}) must match from frame of left hand side ({1})' . format ( other_tf . to_frame , self . from_frame ) ) pose_tf = self . matrix . dot ( other_tf . matrix ) rotation , translation = RigidTransform . rotation_and_translation_from_matrix ( pose_tf ) if isinstance ( other_tf , SimilarityTransform ) : return SimilarityTransform ( self . rotation , self . translation , scale = 1.0 , from_frame = self . from_frame , to_frame = self . to_frame ) * other_tf return RigidTransform ( rotation , translation , from_frame = other_tf . from_frame , to_frame = self . to_frame )
Compose this rigid transform with another .
53,597
def inverse ( self ) : inv_rotation = self . rotation . T inv_translation = np . dot ( - self . rotation . T , self . translation ) return RigidTransform ( inv_rotation , inv_translation , from_frame = self . _to_frame , to_frame = self . _from_frame )
Take the inverse of the rigid transform .
53,598
def save ( self , filename ) : file_root , file_ext = os . path . splitext ( filename ) if file_ext . lower ( ) != TF_EXTENSION : raise ValueError ( 'Extension %s not supported for RigidTransform. Must be stored with extension %s' % ( file_ext , TF_EXTENSION ) ) f = open ( filename , 'w' ) f . write ( '%s\n' % ( self . _from_frame ) ) f . write ( '%s\n' % ( self . _to_frame ) ) f . write ( '%f %f %f\n' % ( self . _translation [ 0 ] , self . _translation [ 1 ] , self . _translation [ 2 ] ) ) f . write ( '%f %f %f\n' % ( self . _rotation [ 0 , 0 ] , self . _rotation [ 0 , 1 ] , self . _rotation [ 0 , 2 ] ) ) f . write ( '%f %f %f\n' % ( self . _rotation [ 1 , 0 ] , self . _rotation [ 1 , 1 ] , self . _rotation [ 1 , 2 ] ) ) f . write ( '%f %f %f\n' % ( self . _rotation [ 2 , 0 ] , self . _rotation [ 2 , 1 ] , self . _rotation [ 2 , 2 ] ) ) f . close ( )
Save the RigidTransform to a file .
53,599
def as_frames ( self , from_frame , to_frame = 'world' ) : return RigidTransform ( self . rotation , self . translation , from_frame , to_frame )
Return a shallow copy of this rigid transform with just the frames changed .