idx int64 0 63k | question stringlengths 53 5.28k | target stringlengths 5 805 |
|---|---|---|
7,100 | def set_cell ( self , i , j , value ) : bool_tests = [ value in self . _possibles [ i ] [ j ] , value in self . _poss_rows [ i ] , value in self . _poss_cols [ j ] , value in self . _poss_box [ ( i // self . order ) * self . order + ( j // self . order ) ] , value not in self . row ( i ) , value not in self . col ( j ) , value not in self . box ( i , j ) ] if all ( bool_tests ) : self [ i ] [ j ] = value else : raise SudokuHasNoSolutionError ( "This value cannot be set here!" ) | Set a cell s value with a series of safety checks |
7,101 | def solve ( self , verbose = False , allow_brute_force = True ) : while not self . is_solved : self . _update ( ) singles_found = False or self . _fill_naked_singles ( ) or self . _fill_hidden_singles ( ) if not singles_found : if allow_brute_force : solution = None try : dlxs = DancingLinksSolver ( copy . deepcopy ( self . _matrix ) ) solutions = dlxs . solve ( ) solution = next ( solutions ) more_solutions = next ( solutions ) except StopIteration as e : if solution is not None : self . _matrix = solution else : raise SudokuHasNoSolutionError ( "Dancing Links solver could not find any solution." ) except Exception as e : raise SudokuHasNoSolutionError ( "Brute Force method failed." ) else : raise SudokuHasMultipleSolutionsError ( "This Sudoku has multiple solutions!" ) self . solution_steps . append ( "BRUTE FORCE - Dancing Links" ) break else : print ( self ) raise SudokuTooDifficultError ( "This Sudoku requires more advanced methods!" ) if verbose : print ( "Sudoku solved in {0} iterations!\n{1}" . format ( len ( self . solution_steps ) , self ) ) for step in self . solution_steps : print ( step ) | Solve the Sudoku . |
7,102 | def _update ( self ) : for i , ( row , col , box ) in enumerate ( zip ( self . row_iter ( ) , self . col_iter ( ) , self . box_iter ( ) ) ) : self . _poss_rows [ i ] = set ( self . _values ) . difference ( set ( row ) ) self . _poss_cols [ i ] = set ( self . _values ) . difference ( set ( col ) ) self . _poss_box [ i ] = set ( self . _values ) . difference ( set ( box ) ) for i in utils . range_ ( self . side ) : self . _possibles [ i ] = { } for j in utils . range_ ( self . side ) : self . _possibles [ i ] [ j ] = set ( ) if self [ i ] [ j ] > 0 : continue this_box_index = ( ( i // self . order ) * self . order ) + ( j // self . order ) self . _possibles [ i ] [ j ] = self . _poss_rows [ i ] . intersection ( self . _poss_cols [ j ] ) . intersection ( self . _poss_box [ this_box_index ] ) | Calculate remaining values for each row column box and finally cell . |
7,103 | def _fill_naked_singles ( self ) : simple_found = False for i in utils . range_ ( self . side ) : for j in utils . range_ ( self . side ) : if self [ i ] [ j ] > 0 : continue p = self . _possibles [ i ] [ j ] if len ( p ) == 1 : self . set_cell ( i , j , list ( p ) [ 0 ] ) self . solution_steps . append ( self . _format_step ( "NAKED" , ( i , j ) , self [ i ] [ j ] ) ) simple_found = True elif len ( p ) == 0 : raise SudokuHasNoSolutionError ( "Error made! No possible value for ({0},{1})!" . format ( i + 1 , j + 1 ) ) return simple_found | Look for naked singles i . e . cells with ony one possible value . |
7,104 | def _fill_hidden_singles ( self ) : for i in utils . range_ ( self . side ) : box_i = ( i // self . order ) * self . order for j in utils . range_ ( self . side ) : box_j = ( j // self . order ) * self . order if self [ i ] [ j ] > 0 : continue p = self . _possibles [ i ] [ j ] for k in utils . range_ ( self . side ) : if k == j : continue p = p . difference ( self . _possibles [ i ] [ k ] ) if len ( p ) == 1 : self . set_cell ( i , j , p . pop ( ) ) self . solution_steps . append ( self . _format_step ( "HIDDEN-ROW" , ( i , j ) , self [ i ] [ j ] ) ) return True p = self . _possibles [ i ] [ j ] for k in utils . range_ ( self . side ) : if k == i : continue p = p . difference ( self . _possibles [ k ] [ j ] ) if len ( p ) == 1 : self . set_cell ( i , j , p . pop ( ) ) self . solution_steps . append ( self . _format_step ( "HIDDEN-COL" , ( i , j ) , self [ i ] [ j ] ) ) return True p = self . _possibles [ i ] [ j ] for k in utils . range_ ( box_i , box_i + self . order ) : for kk in utils . range_ ( box_j , box_j + self . order ) : if k == i and kk == j : continue p = p . difference ( self . _possibles [ k ] [ kk ] ) if len ( p ) == 1 : self . set_cell ( i , j , p . pop ( ) ) self . solution_steps . append ( self . _format_step ( "HIDDEN-BOX" , ( i , j ) , self [ i ] [ j ] ) ) return True return False | Look for hidden singles i . e . cells with only one unique possible value in row column or box . |
7,105 | def parse ( cls , resource , direction = "children" , ** additional_parameters ) -> "DtsCollection" : data = jsonld . expand ( resource ) if len ( data ) == 0 : raise JsonLdCollectionMissing ( "Missing collection in JSON" ) data = data [ 0 ] obj = cls ( identifier = resource [ "@id" ] , ** additional_parameters ) obj . _parse_metadata ( data ) obj . _parse_members ( data , direction = direction , ** additional_parameters ) return obj | Given a dict representation of a json object generate a DTS Collection |
7,106 | def _filldown ( self , lineno ) : if self . line > lineno : return self . lines . extend ( self . current_context for _ in range ( self . line , lineno ) ) self . line = lineno | Copy current_context into lines down up until lineno |
7,107 | def _add_section ( self , node ) : self . _filldown ( node . lineno ) self . context . append ( node . name ) self . _update_current_context ( ) for _ in map ( self . visit , iter_child_nodes ( node ) ) : pass self . context . pop ( ) self . _update_current_context ( ) | Register the current node as a new context block |
7,108 | def _module_name ( filename ) : absfile = os . path . abspath ( filename ) match = filename for base in [ '' ] + sys . path : base = os . path . abspath ( base ) if absfile . startswith ( base ) : match = absfile [ len ( base ) : ] break return SUFFIX_RE . sub ( '' , match ) . lstrip ( '/' ) . replace ( '/' , '.' ) | Try to find a module name for a file path by stripping off a prefix found in sys . modules . |
7,109 | def from_modulename ( cls , module_name ) : slug = module_name . replace ( '.' , '/' ) paths = [ slug + '.py' , slug + '/__init__.py' ] for base in [ '' ] + sys . path : for path in paths : fullpath = os . path . join ( base , path ) if os . path . exists ( fullpath ) : return cls ( fullpath , prefix = module_name ) else : raise ValueError ( "Module not found: %s" % module_name ) | Build a PythonFile given a dotted module name like a . b . c |
7,110 | def context_range ( self , context ) : if not context . startswith ( self . prefix ) : context = self . prefix + '.' + context lo = hi = None for idx , line_context in enumerate ( self . lines , 1 ) : if line_context . startswith ( context ) : lo = lo or idx hi = idx if lo is None : raise ValueError ( "Context %s does not exist in file %s" % ( context , self . filename ) ) return lo , hi + 1 | Return the 1 - offset right - open range of lines spanned by a particular context name . |
7,111 | def context ( self , line ) : idx = line - 1 if idx >= len ( self . lines ) : return self . prefix return self . lines [ idx ] | Return the context for a given 1 - offset line number . |
7,112 | def write ( label , plist , scope = USER ) : fname = compute_filename ( label , scope ) with open ( fname , "wb" ) as f : plistlib . writePlist ( plist , f ) return fname | Writes the given property list to the appropriate file on disk and returns the absolute filename . |
7,113 | def alphakt_pth ( v , temp , v0 , alpha0 , k0 , n , z , t_ref = 300. , three_r = 3. * constants . R ) : return alpha0 * k0 * ( temp - t_ref ) | calculate thermal pressure from thermal expansion and bulk modulus |
7,114 | def _get_output_nodes ( self , output_path , error_path ) : status = cod_deposition_states . UNKNOWN messages = [ ] if output_path is not None : content = None with open ( output_path ) as f : content = f . read ( ) status , message = CifCodDepositParser . _deposit_result ( content ) messages . extend ( message . split ( '\n' ) ) if error_path is not None : with open ( error_path ) as f : content = f . readlines ( ) lines = [ x . strip ( '\n' ) for x in content ] messages . extend ( lines ) parameters = { 'output_messages' : messages , 'status' : status } output_nodes = [ ] output_nodes . append ( ( 'messages' , Dict ( dict = parameters ) ) ) if status == cod_deposition_states . SUCCESS : return True , output_nodes return False , output_nodes | Extracts output nodes from the standard output and standard error files |
7,115 | def filter_ ( self , columns , value ) : for column in columns : if column not in self . data . columns : raise ValueError ( "Column %s not in DataFrame columns: %s" % ( column , list ( self . data ) ) ) for column in columns : column_len = len ( self . data [ column ] ) if column_len > 0 and column_len != self . data [ column ] . isnull ( ) . sum ( ) : self . data = self . data [ self . data [ column ] != value ] return self . data | This method filter some of the rows where the value is found in each of the columns . |
7,116 | def _check_directory ( directory ) : if directory is not None : if not exists ( directory ) : raise CommandError ( "Cannot run command - directory {0} does not exist" . format ( directory ) ) if not isdir ( directory ) : raise CommandError ( "Cannot run command - specified directory {0} is not a directory." . format ( directory ) ) | Raise exception if directory does not exist . |
7,117 | def load_tweets ( filename = 'tweets.zip' ) : r basename , ext = os . path . splitext ( filename ) json_file = basename + '.json' json_path = os . path . join ( DATA_PATH , json_file ) zip_path = os . path . join ( DATA_PATH , basename + '.zip' ) if not os . path . isfile ( json_path ) : zf = ZipFile ( zip_path , 'r' ) zf . extract ( json_file , DATA_PATH ) with open ( json_path , 'rUb' ) as f : return json . load ( f ) | r Extract the cached tweets database if necessary and load + parse the json . |
7,118 | def main ( args ) : global logging , log args = parse_args ( args ) logging . basicConfig ( format = LOG_FORMAT , level = logging . DEBUG if args . verbose else logging . INFO , stream = sys . stdout ) df = cat_tweets ( path = args . path , verbosity = args . verbose + 1 , numtweets = args . numtweets , ignore_suspicious = False ) log . info ( 'Combined {} tweets' . format ( len ( df ) ) ) df = drop_nan_columns ( df ) save_tweets ( df , path = args . path , filename = args . tweetfile ) geo = get_geo ( df , path = args . path , filename = args . geofile ) log . info ( "Combined {} tweets into a single file {} and set asside {} geo tweets in {}" . format ( len ( df ) , args . tweetfile , len ( geo ) , args . geofile ) ) return df , geo | API with args object containing configuration parameters |
7,119 | def drop_nan_columns ( df , thresh = 325 ) : if thresh < 1 : thresh = int ( thresh * df ) return df . dropna ( axis = 1 , thresh = thresh , inplace = False ) | Drop columns that are mostly NaNs |
7,120 | def fast_deduplication_backup ( self , old_backup_entry , process_bar ) : src_path = self . dir_path . resolved_path log . debug ( "*** fast deduplication backup: '%s'" , src_path ) old_file_path = old_backup_entry . get_backup_path ( ) if not self . path_helper . abs_dst_path . is_dir ( ) : try : self . path_helper . abs_dst_path . makedirs ( mode = phlb_config . default_new_path_mode ) except OSError as err : raise BackupFileError ( "Error creating out path: %s" % err ) else : assert not self . path_helper . abs_dst_filepath . is_file ( ) , ( "Out file already exists: %r" % self . path_helper . abs_src_filepath ) with self . path_helper . abs_dst_hash_filepath . open ( "w" ) as hash_file : try : old_file_path . link ( self . path_helper . abs_dst_filepath ) except OSError as err : log . error ( "Can't link '%s' to '%s': %s" % ( old_file_path , self . path_helper . abs_dst_filepath , err ) ) log . info ( "Mark %r with 'no link source'." , old_backup_entry ) old_backup_entry . no_link_source = True old_backup_entry . save ( ) self . deduplication_backup ( process_bar ) return hash_hexdigest = old_backup_entry . content_info . hash_hexdigest hash_file . write ( hash_hexdigest ) file_size = self . dir_path . stat . st_size if file_size > 0 : process_bar . update ( file_size ) BackupEntry . objects . create ( backup_run = self . backup_run , backup_entry_path = self . path_helper . abs_dst_filepath , hash_hexdigest = hash_hexdigest , ) if self . _SIMULATE_SLOW_SPEED : log . error ( "Slow down speed for tests!" ) time . sleep ( self . _SIMULATE_SLOW_SPEED ) self . fast_backup = True self . file_linked = True | We can just link a old backup entry |
7,121 | def deduplication_backup ( self , process_bar ) : self . fast_backup = False src_path = self . dir_path . resolved_path log . debug ( "*** deduplication backup: '%s'" , src_path ) log . debug ( "abs_src_filepath: '%s'" , self . path_helper . abs_src_filepath ) log . debug ( "abs_dst_filepath: '%s'" , self . path_helper . abs_dst_filepath ) log . debug ( "abs_dst_hash_filepath: '%s'" , self . path_helper . abs_dst_hash_filepath ) log . debug ( "abs_dst_dir: '%s'" , self . path_helper . abs_dst_path ) if not self . path_helper . abs_dst_path . is_dir ( ) : try : self . path_helper . abs_dst_path . makedirs ( mode = phlb_config . default_new_path_mode ) except OSError as err : raise BackupFileError ( "Error creating out path: %s" % err ) else : assert not self . path_helper . abs_dst_filepath . is_file ( ) , ( "Out file already exists: %r" % self . path_helper . abs_src_filepath ) try : try : with self . path_helper . abs_src_filepath . open ( "rb" ) as in_file : with self . path_helper . abs_dst_hash_filepath . open ( "w" ) as hash_file : with self . path_helper . abs_dst_filepath . open ( "wb" ) as out_file : hash = self . _deduplication_backup ( self . dir_path , in_file , out_file , process_bar ) hash_hexdigest = hash . hexdigest ( ) hash_file . write ( hash_hexdigest ) except OSError as err : raise BackupFileError ( "Skip file %s error: %s" % ( self . path_helper . abs_src_filepath , err ) ) except KeyboardInterrupt : try : self . path_helper . abs_dst_filepath . unlink ( ) except OSError : pass try : self . path_helper . abs_dst_hash_filepath . unlink ( ) except OSError : pass raise KeyboardInterrupt old_backup_entry = deduplicate ( self . path_helper . abs_dst_filepath , hash_hexdigest ) if old_backup_entry is None : log . debug ( "File is unique." ) self . file_linked = False else : log . debug ( "File was deduplicated via hardlink to: %s" % old_backup_entry ) self . file_linked = True atime_ns = self . dir_path . stat . st_atime_ns mtime_ns = self . dir_path . stat . st_mtime_ns self . path_helper . abs_dst_filepath . utime ( ns = ( atime_ns , mtime_ns ) ) log . debug ( "Set mtime to: %s" % mtime_ns ) BackupEntry . objects . create ( backup_run = self . backup_run , backup_entry_path = self . path_helper . abs_dst_filepath , hash_hexdigest = hash_hexdigest , ) self . fast_backup = False | Backup the current file and compare the content . |
7,122 | def _backup_dir_item ( self , dir_path , process_bar ) : self . path_helper . set_src_filepath ( dir_path ) if self . path_helper . abs_src_filepath is None : self . total_errored_items += 1 log . info ( "Can't backup %r" , dir_path ) if dir_path . is_symlink : self . summary ( "TODO Symlink: %s" % dir_path ) return if dir_path . resolve_error is not None : self . summary ( "TODO resolve error: %s" % dir_path . resolve_error ) pprint_path ( dir_path ) return if dir_path . different_path : self . summary ( "TODO different path:" ) pprint_path ( dir_path ) return if dir_path . is_dir : self . summary ( "TODO dir: %s" % dir_path ) elif dir_path . is_file : file_backup = FileBackup ( dir_path , self . path_helper , self . backup_run ) old_backup_entry = self . fast_compare ( dir_path ) if old_backup_entry is not None : file_backup . fast_deduplication_backup ( old_backup_entry , process_bar ) else : file_backup . deduplication_backup ( process_bar ) assert file_backup . fast_backup is not None , dir_path . path assert file_backup . file_linked is not None , dir_path . path file_size = dir_path . stat . st_size if file_backup . file_linked : self . total_file_link_count += 1 self . total_stined_bytes += file_size else : self . total_new_file_count += 1 self . total_new_bytes += file_size if file_backup . fast_backup : self . total_fast_backup += 1 else : self . summary ( "TODO:" % dir_path ) pprint_path ( dir_path ) | Backup one dir item |
7,123 | def print_update ( self ) : print ( "\r\n" ) now = datetime . datetime . now ( ) print ( "Update info: (from: %s)" % now . strftime ( "%c" ) ) current_total_size = self . total_stined_bytes + self . total_new_bytes if self . total_errored_items : print ( " * WARNING: %i omitted files!" % self . total_errored_items ) print ( " * fast backup: %i files" % self . total_fast_backup ) print ( " * new content saved: %i files (%s %.1f%%)" % ( self . total_new_file_count , human_filesize ( self . total_new_bytes ) , to_percent ( self . total_new_bytes , current_total_size ) , ) ) print ( " * stint space via hardlinks: %i files (%s %.1f%%)" % ( self . total_file_link_count , human_filesize ( self . total_stined_bytes ) , to_percent ( self . total_stined_bytes , current_total_size ) , ) ) duration = default_timer ( ) - self . start_time performance = current_total_size / duration / 1024.0 / 1024.0 print ( " * present performance: %.1fMB/s\n" % performance ) | print some status information in between . |
7,124 | def cli ( code , cif , parameters , daemon ) : from aiida import orm from aiida . plugins import factories from aiida_codtools . common . cli import CliParameters , CliRunner from aiida_codtools . common . resources import get_default_options process = factories . CalculationFactory ( code . get_attribute ( 'input_plugin' ) ) parameters = CliParameters . from_string ( parameters ) . get_dictionary ( ) inputs = { 'cif' : cif , 'code' : code , 'metadata' : { 'options' : get_default_options ( ) } } if parameters : inputs [ 'parameters' ] = orm . Dict ( dict = parameters ) cli_runner = CliRunner ( process , inputs ) cli_runner . run ( daemon = daemon ) | Run any cod - tools calculation for the given CifData node . |
7,125 | def make ( parser ) : s = parser . add_subparsers ( title = 'commands' , metavar = 'COMMAND' , help = 'description' , ) def gen_pass_f ( args ) : gen_pass ( ) gen_pass_parser = s . add_parser ( 'gen-pass' , help = 'generate the password' ) gen_pass_parser . set_defaults ( func = gen_pass_f ) def cmd_f ( args ) : cmd ( args . user , args . hosts . split ( ',' ) , args . key_filename , args . password , args . run ) cmd_parser = s . add_parser ( 'cmd' , help = 'run command line on the target host' ) cmd_parser . add_argument ( '--run' , help = 'the command running on the remote node' , action = 'store' , default = None , dest = 'run' ) cmd_parser . set_defaults ( func = cmd_f ) | DEPRECATED prepare OpenStack basic environment |
7,126 | def pw_converter ( handler , flt ) : import peewee as pw if isinstance ( flt , Filter ) : return flt model = handler . model field = getattr ( model , flt ) if isinstance ( field , pw . BooleanField ) : return PWBoolFilter ( flt ) if field . choices : choices = [ ( Filter . default , '---' ) ] + list ( field . choices ) return PWChoiceFilter ( flt , choices = choices ) return PWFilter ( flt ) | Convert column name to filter . |
7,127 | def process ( self , * args , ** kwargs ) : super ( RawIDField , self ) . process ( * args , ** kwargs ) if self . object_data : self . description = self . description or str ( self . object_data ) | Get a description . |
7,128 | def _value ( self ) : if self . data is not None : value = self . data . _data . get ( self . field . to_field . name ) return str ( value ) return '' | Get field value . |
7,129 | def sort ( self , request , reverse = False ) : field = self . model . _meta . fields . get ( self . columns_sort ) if not field : return self . collection if reverse : field = field . desc ( ) return self . collection . order_by ( field ) | Sort current collection . |
7,130 | def value ( self , data ) : value = data . get ( self . name ) if value : return int ( value ) return self . default | Get value from data . |
7,131 | def get_fields ( node , fields_tag = "field_list" ) : fields_nodes = [ c for c in node . children if c . tagname == fields_tag ] if len ( fields_nodes ) == 0 : return { } assert len ( fields_nodes ) == 1 , "multiple nodes with tag " + fields_tag fields_node = fields_nodes [ 0 ] fields = [ { f . tagname : f . rawsource . strip ( ) for f in n . children } for n in fields_node . children if n . tagname == "field" ] return { f [ "field_name" ] : f [ "field_body" ] for f in fields } | Get the field names and their values from a node . |
7,132 | def extract_signature ( docstring ) : root = publish_doctree ( docstring , settings_overrides = { "report_level" : 5 } ) fields = get_fields ( root ) return fields . get ( SIG_FIELD ) | Extract the signature from a docstring . |
7,133 | def split_parameter_types ( parameters ) : if parameters == "" : return [ ] commas = [ ] bracket_depth = 0 for i , char in enumerate ( parameters ) : if ( char == "," ) and ( bracket_depth == 0 ) : commas . append ( i ) elif char == "[" : bracket_depth += 1 elif char == "]" : bracket_depth -= 1 types = [ ] last_i = 0 for i in commas : types . append ( parameters [ last_i : i ] . strip ( ) ) last_i = i + 1 else : types . append ( parameters [ last_i : ] . strip ( ) ) return types | Split a parameter types declaration into individual types . |
7,134 | def parse_signature ( signature ) : if " -> " not in signature : param_types , return_type = None , signature . strip ( ) else : lhs , return_type = [ s . strip ( ) for s in signature . split ( " -> " ) ] csv = lhs [ 1 : - 1 ] . strip ( ) param_types = split_parameter_types ( csv ) requires = set ( _RE_QUALIFIED_TYPES . findall ( signature ) ) return param_types , return_type , requires | Parse a signature into its input and return parameter types . |
7,135 | def get_aliases ( lines ) : aliases = { } for line in lines : line = line . strip ( ) if len ( line ) > 0 and line . startswith ( SIG_ALIAS ) : _ , content = line . split ( SIG_ALIAS ) alias , signature = [ t . strip ( ) for t in content . split ( "=" ) ] aliases [ alias ] = signature return aliases | Get the type aliases in the source . |
7,136 | def get_stub ( source , generic = False ) : generator = StubGenerator ( source , generic = generic ) stub = generator . generate_stub ( ) return stub | Get the stub code for a source code . |
7,137 | def get_mod_paths ( mod_name , out_dir ) : paths = [ ] try : mod = get_loader ( mod_name ) source = Path ( mod . path ) if source . name . endswith ( ".py" ) : source_rel = Path ( * mod_name . split ( "." ) ) if source . name == "__init__.py" : source_rel = source_rel . joinpath ( "__init__.py" ) destination = Path ( out_dir , source_rel . with_suffix ( ".pyi" ) ) paths . append ( ( source , destination ) ) except Exception as e : _logger . debug ( e ) _logger . warning ( "cannot handle module, skipping: %s" , mod_name ) return paths | Get source and stub paths for a module . |
7,138 | def get_pkg_paths ( pkg_name , out_dir ) : paths = [ ] try : pkg = import_module ( pkg_name ) if not hasattr ( pkg , "__path__" ) : return get_mod_paths ( pkg_name , out_dir ) for mod_info in walk_packages ( pkg . __path__ , pkg . __name__ + "." ) : mod_paths = get_mod_paths ( mod_info . name , out_dir ) paths . extend ( mod_paths ) except Exception as e : _logger . debug ( e ) _logger . warning ( "cannot handle package, skipping: %s" , pkg_name ) return paths | Recursively get all source and stub paths for a package . |
7,139 | def process_docstring ( app , what , name , obj , options , lines ) : aliases = getattr ( app , "_sigaliases" , None ) if aliases is None : if what == "module" : aliases = get_aliases ( inspect . getsource ( obj ) . splitlines ( ) ) app . _sigaliases = aliases sig_marker = ":" + SIG_FIELD + ":" is_class = what in ( "class" , "exception" ) signature = extract_signature ( "\n" . join ( lines ) ) if signature is None : if not is_class : return init_method = getattr ( obj , "__init__" ) init_doc = init_method . __doc__ init_lines = init_doc . splitlines ( ) [ 1 : ] if len ( init_lines ) > 1 : init_doc = textwrap . dedent ( "\n" . join ( init_lines [ 1 : ] ) ) init_lines = init_doc . splitlines ( ) if sig_marker not in init_doc : return sig_started = False for line in init_lines : if line . lstrip ( ) . startswith ( sig_marker ) : sig_started = True if sig_started : lines . append ( line ) signature = extract_signature ( "\n" . join ( lines ) ) if is_class : obj = init_method param_types , rtype , _ = parse_signature ( signature ) param_names = [ p for p in inspect . signature ( obj ) . parameters ] if is_class and ( param_names [ 0 ] == "self" ) : del param_names [ 0 ] if len ( param_names ) == len ( param_types ) : for name , type_ in zip ( param_names , param_types ) : find = ":param %(name)s:" % { "name" : name } alias = aliases . get ( type_ ) if alias is not None : type_ = "*%(type)s* :sup:`%(alias)s`" % { "type" : type_ , "alias" : alias } for i , line in enumerate ( lines ) : if line . startswith ( find ) : lines . insert ( i , ":type %(name)s: %(type)s" % { "name" : name , "type" : type_ } ) break if not is_class : for i , line in enumerate ( lines ) : if line . startswith ( ( ":return:" , ":returns:" ) ) : lines . insert ( i , ":rtype: " + rtype ) break sig_start = 0 while sig_start < len ( lines ) : if lines [ sig_start ] . startswith ( sig_marker ) : break sig_start += 1 sig_end = sig_start + 1 while sig_end < len ( lines ) : if ( not lines [ sig_end ] ) or ( lines [ sig_end ] [ 0 ] != " " ) : break sig_end += 1 for i in reversed ( range ( sig_start , sig_end ) ) : del lines [ i ] | Modify the docstring before generating documentation . |
7,140 | def main ( argv = None ) : parser = ArgumentParser ( prog = "pygenstub" ) parser . add_argument ( "--version" , action = "version" , version = "%(prog)s " + __version__ ) parser . add_argument ( "files" , nargs = "*" , help = "generate stubs for given files" ) parser . add_argument ( "-m" , "--module" , action = "append" , metavar = "MODULE" , dest = "modules" , default = [ ] , help = "generate stubs for given modules" , ) parser . add_argument ( "-o" , "--output" , metavar = "PATH" , dest = "out_dir" , help = "change the output directory" ) parser . add_argument ( "--generic" , action = "store_true" , default = False , help = "generate generic stubs" ) parser . add_argument ( "--debug" , action = "store_true" , help = "enable debug messages" ) argv = argv if argv is not None else sys . argv arguments = parser . parse_args ( argv [ 1 : ] ) if arguments . debug : logging . basicConfig ( level = logging . DEBUG ) _logger . debug ( "running in debug mode" ) out_dir = arguments . out_dir if arguments . out_dir is not None else "" if ( out_dir == "" ) and ( len ( arguments . modules ) > 0 ) : print ( "Output directory must be given when generating stubs for modules." ) sys . exit ( 1 ) modules = [ ] for path in arguments . files : paths = Path ( path ) . glob ( "**/*.py" ) if Path ( path ) . is_dir ( ) else [ Path ( path ) ] for source in paths : if str ( source ) . startswith ( os . path . pardir ) : source = source . absolute ( ) . resolve ( ) if ( out_dir != "" ) and source . is_absolute ( ) : source = source . relative_to ( source . root ) destination = Path ( out_dir , source . with_suffix ( ".pyi" ) ) modules . append ( ( source , destination ) ) for mod_name in arguments . modules : modules . extend ( get_pkg_paths ( mod_name , out_dir ) ) for source , destination in modules : _logger . info ( "generating stub for %s to path %s" , source , destination ) with source . open ( ) as f : code = f . read ( ) try : stub = get_stub ( code , generic = arguments . generic ) except Exception as e : print ( source , "-" , e , file = sys . stderr ) continue if stub != "" : if not destination . parent . exists ( ) : destination . parent . mkdir ( parents = True ) with destination . open ( "w" ) as f : f . write ( "# " + EDIT_WARNING + "\n\n" + stub ) | Start the command line interface . |
7,141 | def add_variable ( self , node ) : if node . name not in self . variable_names : self . variables . append ( node ) self . variable_names . add ( node . name ) node . parent = self | Add a variable node to this node . |
7,142 | def get_code ( self ) : stub = [ ] for child in self . variables : stub . extend ( child . get_code ( ) ) if ( ( len ( self . variables ) > 0 ) and ( len ( self . children ) > 0 ) and ( not isinstance ( self , ClassNode ) ) ) : stub . append ( "" ) for child in self . children : stub . extend ( child . get_code ( ) ) return stub | Get the stub code for this node . |
7,143 | def get_code ( self ) : stub = [ ] for deco in self . decorators : if ( deco in DECORATORS ) or deco . endswith ( ".setter" ) : stub . append ( "@" + deco ) parameters = [ ] for name , type_ , has_default in self . parameters : decl = "%(n)s%(t)s%(d)s" % { "n" : name , "t" : ": " + type_ if type_ else "" , "d" : " = ..." if has_default else "" , } parameters . append ( decl ) slots = { "a" : "async " if self . _async else "" , "n" : self . name , "p" : ", " . join ( parameters ) , "r" : self . rtype , } prototype = "%(a)sdef %(n)s(%(p)s) -> %(r)s: ..." % slots if len ( prototype ) <= LINE_LENGTH_LIMIT : stub . append ( prototype ) elif len ( INDENT + slots [ "p" ] ) <= LINE_LENGTH_LIMIT : stub . append ( "%(a)sdef %(n)s(" % slots ) stub . append ( INDENT + slots [ "p" ] ) stub . append ( ") -> %(r)s: ..." % slots ) else : stub . append ( "%(a)sdef %(n)s(" % slots ) for param in parameters : stub . append ( INDENT + param + "," ) stub . append ( ") -> %(r)s: ..." % slots ) return stub | Get the stub code for this function . |
7,144 | def get_code ( self ) : stub = [ ] bases = ( "(" + ", " . join ( self . bases ) + ")" ) if len ( self . bases ) > 0 else "" slots = { "n" : self . name , "b" : bases } if ( len ( self . children ) == 0 ) and ( len ( self . variables ) == 0 ) : stub . append ( "class %(n)s%(b)s: ..." % slots ) else : stub . append ( "class %(n)s%(b)s:" % slots ) super_code = super ( ) . get_code ( ) if PY3 else StubNode . get_code ( self ) for line in super_code : stub . append ( INDENT + line ) return stub | Get the stub code for this class . |
7,145 | def collect_aliases ( self ) : self . aliases = get_aliases ( self . _code_lines ) for alias , signature in self . aliases . items ( ) : _ , _ , requires = parse_signature ( signature ) self . required_types |= requires self . defined_types |= { alias } | Collect the type aliases in the source . |
7,146 | def visit_Import ( self , node ) : line = self . _code_lines [ node . lineno - 1 ] module_name = line . split ( "import" ) [ 0 ] . strip ( ) for name in node . names : imported_name = name . name if name . asname : imported_name = name . asname + "::" + imported_name self . imported_namespaces [ imported_name ] = module_name | Visit an import node . |
7,147 | def visit_ImportFrom ( self , node ) : line = self . _code_lines [ node . lineno - 1 ] module_name = line . split ( "from" ) [ 1 ] . split ( "import" ) [ 0 ] . strip ( ) for name in node . names : imported_name = name . name if name . asname : imported_name = name . asname + "::" + imported_name self . imported_names [ imported_name ] = module_name | Visit an from - import node . |
7,148 | def visit_Assign ( self , node ) : line = self . _code_lines [ node . lineno - 1 ] if SIG_COMMENT in line : line = _RE_COMMENT_IN_STRING . sub ( "" , line ) if ( SIG_COMMENT not in line ) and ( not self . generic ) : return if SIG_COMMENT in line : _ , signature = line . split ( SIG_COMMENT ) _ , return_type , requires = parse_signature ( signature ) self . required_types |= requires parent = self . _parents [ - 1 ] for var in node . targets : if isinstance ( var , ast . Name ) : name , p = var . id , parent elif ( isinstance ( var , ast . Attribute ) and isinstance ( var . value , ast . Name ) and ( var . value . id == "self" ) ) : name , p = var . attr , parent . parent else : name , p = None , None if name is not None : if self . generic : return_type = "Any" self . required_types . add ( return_type ) stub_node = VariableNode ( name , return_type ) p . add_variable ( stub_node ) | Visit an assignment node . |
7,149 | def visit_FunctionDef ( self , node ) : node = self . get_function_node ( node ) if node is not None : node . _async = False | Visit a function node . |
7,150 | def visit_AsyncFunctionDef ( self , node ) : node = self . get_function_node ( node ) if node is not None : node . _async = True | Visit an async function node . |
7,151 | def visit_ClassDef ( self , node ) : self . defined_types . add ( node . name ) bases = [ ] for n in node . bases : base_parts = [ ] while True : if not isinstance ( n , ast . Attribute ) : base_parts . append ( n . id ) break else : base_parts . append ( n . attr ) n = n . value bases . append ( "." . join ( base_parts [ : : - 1 ] ) ) self . required_types |= set ( bases ) signature = get_signature ( node ) stub_node = ClassNode ( node . name , bases = bases , signature = signature ) self . _parents [ - 1 ] . add_child ( stub_node ) self . _parents . append ( stub_node ) self . generic_visit ( node ) del self . _parents [ - 1 ] | Visit a class node . |
7,152 | def generate_import_from ( module_ , names ) : regular_names = [ n for n in names if "::" not in n ] as_names = [ n for n in names if "::" in n ] line = "" if len ( regular_names ) > 0 : slots = { "m" : module_ , "n" : ", " . join ( sorted ( regular_names ) ) } line = "from %(m)s import %(n)s" % slots if len ( line ) > LINE_LENGTH_LIMIT : slots [ "n" ] = INDENT + ( ",\n" + INDENT ) . join ( sorted ( regular_names ) ) + "," line = "from %(m)s import (\n%(n)s\n)" % slots if len ( as_names ) > 0 : line += "\n" for as_name in as_names : a , n = as_name . split ( "::" ) line += "from %(m)s import %(n)s as %(a)s" % { "m" : module_ , "n" : n , "a" : a } return line | Generate an import line . |
7,153 | def has_csv_permission ( self , request , obj = None ) : if getattr ( settings , 'DJANGO_EXPORTS_REQUIRE_PERM' , None ) : opts = self . opts codename = '%s_%s' % ( 'csv' , opts . object_name . lower ( ) ) return request . user . has_perm ( "%s.%s" % ( opts . app_label , codename ) ) return True | Returns True if the given request has permission to add an object . Can be overridden by the user in subclasses . By default we assume all staff users can use this action unless DJANGO_EXPORTS_REQUIRE_PERM is set to True in your django settings . |
7,154 | def assoc ( self , key , value ) : copydict = ImmutableDict ( ) copydict . tree = self . tree . assoc ( hash ( key ) , ( key , value ) ) copydict . _length = self . _length + 1 return copydict | Returns a new ImmutableDict instance with value associated with key . The implicit parameter is not modified . |
7,155 | def update ( self , other = None , ** kwargs ) : copydict = ImmutableDict ( ) if other : vallist = [ ( hash ( key ) , ( key , other [ key ] ) ) for key in other ] else : vallist = [ ] if kwargs : vallist += [ ( hash ( key ) , ( key , kwargs [ key ] ) ) for key in kwargs ] copydict . tree = self . tree . multi_assoc ( vallist ) copydict . _length = iter_length ( copydict . tree ) return copydict | Takes the same arguments as the update method in the builtin dict class . However this version returns a new ImmutableDict instead of modifying in - place . |
7,156 | def remove ( self , key ) : copydict = ImmutableDict ( ) copydict . tree = self . tree . remove ( hash ( key ) ) copydict . _length = self . _length - 1 return copydict | Returns a new ImmutableDict with the given key removed . |
7,157 | def _load_config ( self ) : "Load and parse config file, pass options to livestreamer" config = SafeConfigParser ( ) config_file = os . path . join ( self . config_path , 'settings.ini' ) config . read ( config_file ) for option , type in list ( AVAILABLE_OPTIONS . items ( ) ) : if config . has_option ( 'DEFAULT' , option ) : if type == 'int' : value = config . getint ( 'DEFAULT' , option ) if type == 'float' : value = config . getfloat ( 'DEFAULT' , option ) if type == 'bool' : value = config . getboolean ( 'DEFAULT' , option ) if type == 'str' : value = config . get ( 'DEFAULT' , option ) self . livestreamer . set_option ( option , value ) | Load and parse config file pass options to livestreamer |
7,158 | def urn ( self , value : Union [ URN , str ] ) : if isinstance ( value , str ) : value = URN ( value ) elif not isinstance ( value , URN ) : raise TypeError ( "New urn must be string or {} instead of {}" . format ( type ( URN ) , type ( value ) ) ) self . _urn = value | Set the urn |
7,159 | def get_cts_metadata ( self , key : str , lang : str = None ) -> Literal : return self . metadata . get_single ( RDF_NAMESPACES . CTS . term ( key ) , lang ) | Get easily a metadata from the CTS namespace |
7,160 | def set_metadata_from_collection ( self , text_metadata : CtsTextMetadata ) : edition , work , textgroup = tuple ( ( [ text_metadata ] + text_metadata . parents ) [ : 3 ] ) for node in textgroup . metadata . get ( RDF_NAMESPACES . CTS . groupname ) : lang = node . language self . metadata . add ( RDF_NAMESPACES . CTS . groupname , lang = lang , value = str ( node ) ) self . set_creator ( str ( node ) , lang ) for node in work . metadata . get ( RDF_NAMESPACES . CTS . title ) : lang = node . language self . metadata . add ( RDF_NAMESPACES . CTS . title , lang = lang , value = str ( node ) ) self . set_title ( str ( node ) , lang ) for node in edition . metadata . get ( RDF_NAMESPACES . CTS . label ) : lang = node . language self . metadata . add ( RDF_NAMESPACES . CTS . label , lang = lang , value = str ( node ) ) self . set_subject ( str ( node ) , lang ) for node in edition . metadata . get ( RDF_NAMESPACES . CTS . description ) : lang = node . language self . metadata . add ( RDF_NAMESPACES . CTS . description , lang = lang , value = str ( node ) ) self . set_description ( str ( node ) , lang ) if not self . citation . is_set ( ) and edition . citation . is_set ( ) : self . citation = edition . citation | Set the object metadata using its collections recursively |
7,161 | def create_datapoint ( value , timestamp = None , ** tags ) : if timestamp is None : timestamp = time_millis ( ) if type ( timestamp ) is datetime : timestamp = datetime_to_time_millis ( timestamp ) item = { 'timestamp' : timestamp , 'value' : value } if tags is not None : item [ 'tags' ] = tags return item | Creates a single datapoint dict with a value timestamp and tags . |
7,162 | def create_metric ( metric_type , metric_id , data ) : if not isinstance ( data , list ) : data = [ data ] return { 'type' : metric_type , 'id' : metric_id , 'data' : data } | Create Hawkular - Metrics submittable structure . |
7,163 | def put ( self , data ) : if not isinstance ( data , list ) : data = [ data ] r = collections . defaultdict ( list ) for d in data : metric_type = d . pop ( 'type' , None ) if metric_type is None : raise HawkularError ( 'Undefined MetricType' ) r [ metric_type ] . append ( d ) for l in r : self . _post ( self . _get_metrics_raw_url ( self . _get_url ( l ) ) , r [ l ] , parse_json = False ) | Send multiple different metric_ids to the server in a single batch . Metrics can be a mixture of types . |
7,164 | def push ( self , metric_type , metric_id , value , timestamp = None ) : if type ( timestamp ) is datetime : timestamp = datetime_to_time_millis ( timestamp ) item = create_metric ( metric_type , metric_id , create_datapoint ( value , timestamp ) ) self . put ( item ) | Pushes a single metric_id datapoint combination to the server . |
7,165 | def query_metric ( self , metric_type , metric_id , start = None , end = None , ** query_options ) : if start is not None : if type ( start ) is datetime : query_options [ 'start' ] = datetime_to_time_millis ( start ) else : query_options [ 'start' ] = start if end is not None : if type ( end ) is datetime : query_options [ 'end' ] = datetime_to_time_millis ( end ) else : query_options [ 'end' ] = end return self . _get ( self . _get_metrics_raw_url ( self . _get_metrics_single_url ( metric_type , metric_id ) ) , ** query_options ) | Query for metrics datapoints from the server . |
7,166 | def query_metric_stats ( self , metric_type , metric_id = None , start = None , end = None , bucketDuration = None , ** query_options ) : if start is not None : if type ( start ) is datetime : query_options [ 'start' ] = datetime_to_time_millis ( start ) else : query_options [ 'start' ] = start if end is not None : if type ( end ) is datetime : query_options [ 'end' ] = datetime_to_time_millis ( end ) else : query_options [ 'end' ] = end if bucketDuration is not None : if type ( bucketDuration ) is timedelta : query_options [ 'bucketDuration' ] = timedelta_to_duration ( bucketDuration ) else : query_options [ 'bucketDuration' ] = bucketDuration if metric_id is not None : url = self . _get_metrics_stats_url ( self . _get_metrics_single_url ( metric_type , metric_id ) ) else : if len ( query_options ) < 0 : raise HawkularError ( 'Tags are required when querying without metric_id' ) url = self . _get_metrics_stats_url ( self . _get_url ( metric_type ) ) return self . _get ( url , ** query_options ) | Query for metric aggregates from the server . This is called buckets in the Hawkular - Metrics documentation . |
7,167 | def query_metric_definition ( self , metric_type , metric_id ) : return self . _get ( self . _get_metrics_single_url ( metric_type , metric_id ) ) | Query definition of a single metric id . |
7,168 | def query_metric_definitions ( self , metric_type = None , id_filter = None , ** tags ) : params = { } if id_filter is not None : params [ 'id' ] = id_filter if metric_type is not None : params [ 'type' ] = MetricType . short ( metric_type ) if len ( tags ) > 0 : params [ 'tags' ] = self . _transform_tags ( ** tags ) return self . _get ( self . _get_url ( ) , ** params ) | Query available metric definitions . |
7,169 | def query_tag_values ( self , metric_type = None , ** tags ) : tagql = self . _transform_tags ( ** tags ) return self . _get ( self . _get_metrics_tags_url ( self . _get_url ( metric_type ) ) + '/{}' . format ( tagql ) ) | Query for possible tag values . |
7,170 | def query_metric_tags ( self , metric_type , metric_id ) : definition = self . _get ( self . _get_metrics_tags_url ( self . _get_metrics_single_url ( metric_type , metric_id ) ) ) return definition | Returns a list of tags in the metric definition . |
7,171 | def delete_metric_tags ( self , metric_type , metric_id , ** deleted_tags ) : tags = self . _transform_tags ( ** deleted_tags ) tags_url = self . _get_metrics_tags_url ( self . _get_metrics_single_url ( metric_type , metric_id ) ) + '/{0}' . format ( tags ) self . _delete ( tags_url ) | Delete one or more tags from the metric definition . |
7,172 | def create_tenant ( self , tenant_id , retentions = None ) : item = { 'id' : tenant_id } if retentions is not None : item [ 'retentions' ] = retentions self . _post ( self . _get_tenants_url ( ) , json . dumps ( item , indent = 2 ) ) | Create a tenant . Currently nothing can be set ( to be fixed after the master version of Hawkular - Metrics has fixed implementation . |
7,173 | def get_default_options ( num_machines = 1 , max_wallclock_seconds = 1800 , withmpi = False ) : return { 'resources' : { 'num_machines' : int ( num_machines ) } , 'max_wallclock_seconds' : int ( max_wallclock_seconds ) , 'withmpi' : withmpi , } | Return an instance of the options dictionary with the minimally required parameters for a JobCalculation and set to default values unless overriden |
7,174 | def take_screenshot ( self ) : if not self . failed : return browser = getattr ( world , 'browser' , None ) if not browser : return try : scenario_name = self . scenario . name scenario_index = self . scenario . feature . scenarios . index ( self . scenario ) + 1 except AttributeError : scenario_name = self . background . keyword scenario_index = 0 if self . outline is None : outline_index_str = '' else : outline_index = self . scenario . outlines . index ( self . outline ) + 1 outline_index_str = '_{}' . format ( outline_index ) base_name = FORMAT . format ( feature_file = os . path . relpath ( self . feature . filename ) , scenario_index = scenario_index , scenario_name = scenario_name , outline_index = outline_index_str , ) base_name = re . sub ( r'\W' , '_' , base_name , flags = re . UNICODE ) base_name = os . path . join ( DIRECTORY , base_name ) world . browser . save_screenshot ( '{}.png' . format ( base_name ) ) with open ( '{}.html' . format ( base_name ) , 'w' ) as page_source_file : page_source_file . write ( world . browser . page_source ) | Take a screenshot after a failed step . |
7,175 | def kunc_p ( v , v0 , k0 , k0p , order = 5 ) : return cal_p_kunc ( v , [ v0 , k0 , k0p ] , order = order , uncertainties = isuncertainties ( [ v , v0 , k0 , k0p ] ) ) | calculate Kunc EOS see Dorogokupets 2015 for detail |
7,176 | def cal_p_kunc ( v , k , order = 5 , uncertainties = True ) : v0 = k [ 0 ] k0 = k [ 1 ] k0p = k [ 2 ] x = np . power ( v / v0 , 1. / 3. ) f1 = ( 1. - x ) / ( np . power ( x , order ) ) if uncertainties : f2 = unp . exp ( ( 1.5 * k0p - order + 0.5 ) * ( 1. - x ) ) else : f2 = np . exp ( ( 1.5 * k0p - order + 0.5 ) * ( 1. - x ) ) p = 3. * k0 * f1 * f2 return p | calculate Kunc EOS see Dorogokupets2015 for functional form |
7,177 | def find_files ( path = '' , ext = '' , level = None , typ = list , dirs = False , files = True , verbosity = 0 ) : gen = generate_files ( path , ext = ext , level = level , dirs = dirs , files = files , verbosity = verbosity ) if isinstance ( typ ( ) , collections . Mapping ) : return typ ( ( ff [ 'path' ] , ff ) for ff in gen ) elif typ is not None : return typ ( gen ) else : return gen | Recursively find all files in the indicated directory |
7,178 | def serialize ( self ) : if type ( self . value ) == int : return "i{:X}s" . format ( self . value ) . encode ( 'ascii' ) if type ( self . value ) == str : value = self . value . encode ( 'utf-8' ) return "{:X}:" . format ( len ( value ) ) . encode ( 'ascii' ) + value if type ( self . value ) == bytes : value = base64 . standard_b64encode ( self . value ) return "u{:X}:" . format ( len ( value ) ) . encode ( 'ascii' ) + value if type ( self . value ) == list : items = [ LiveMessageToken ( m ) . serialize ( ) for m in self . value ] return b'l' + b'' . join ( items ) + b's' if type ( self . value ) == dict : items = [ ] for key , value in self . value . items ( ) : items . append ( LiveMessageToken ( str ( key ) ) . serialize ( ) ) items . append ( LiveMessageToken ( value ) . serialize ( ) ) return b'h' + b'' . join ( items ) + b's' raise RuntimeError ( "Unknown type %s" % type ( self . value ) ) | Serialize the token and return it as bytes . |
7,179 | def write ( self , file_or_path , append = False , timeout = 10 ) : if isinstance ( file_or_path , six . string_types ) : if self . coverage : file_or_path = get_smother_filename ( file_or_path , self . coverage . config . parallel ) outfile = Lock ( file_or_path , mode = 'a+' , timeout = timeout , fail_when_locked = False ) else : outfile = noclose ( file_or_path ) with outfile as fh : if append : fh . seek ( 0 ) try : other = Smother . load ( fh ) except ValueError : pass else : self |= other fh . seek ( 0 ) fh . truncate ( ) json . dump ( self . data , fh ) | Write Smother results to a file . |
7,180 | def query_context ( self , regions , file_factory = PythonFile ) : result = set ( ) for region in regions : try : pf = file_factory ( region . filename ) except InvalidPythonFile : continue paths = { os . path . abspath ( region . filename ) , os . path . relpath ( region . filename ) } for test_context , hits in six . iteritems ( self . data ) : if test_context in result : continue for path in paths : if region . intersects ( pf , hits . get ( path , [ ] ) ) : result . add ( test_context ) return QueryResult ( result ) | Return which set of test contexts intersect a set of code regions . |
7,181 | def add_child ( self , child ) : if isinstance ( child , BaseCitation ) : self . _children . append ( child ) | Adds a child to the CitationSet |
7,182 | def depth ( self ) -> int : if len ( self . children ) : return 1 + max ( [ child . depth for child in self . children ] ) else : return 1 | Depth of the citation scheme |
7,183 | def set_link ( self , prop , value ) : if not isinstance ( value , URIRef ) : value = URIRef ( value ) self . metadata . add ( prop , value ) | Set given link in CTS Namespace |
7,184 | def editions ( self ) : return [ item for urn , item in self . parent . children . items ( ) if isinstance ( item , CtsEditionMetadata ) ] | Get all editions of the texts |
7,185 | def get_description ( self , lang = None ) : return self . metadata . get_single ( key = RDF_NAMESPACES . CTS . description , lang = lang ) | Get the DC description of the object |
7,186 | def lang ( self ) : return str ( self . graph . value ( self . asNode ( ) , DC . language ) ) | Languages this text is in |
7,187 | def lang ( self , lang ) : self . graph . set ( ( self . asNode ( ) , DC . language , Literal ( lang ) ) ) | Language this text is available in |
7,188 | def update ( self , other ) : if not isinstance ( other , CtsWorkMetadata ) : raise TypeError ( "Cannot add %s to CtsWorkMetadata" % type ( other ) ) elif self . urn != other . urn : raise InvalidURN ( "Cannot add CtsWorkMetadata %s to CtsWorkMetadata %s " % ( self . urn , other . urn ) ) for urn , text in other . children . items ( ) : self . texts [ urn ] = text self . texts [ urn ] . parent = self self . texts [ urn ] . resource = None return self | Merge two XmlCtsWorkMetadata Objects . |
7,189 | def get_translation_in ( self , key = None ) : if key is not None : return [ item for item in self . texts . values ( ) if isinstance ( item , CtsTranslationMetadata ) and item . lang == key ] else : return [ item for item in self . texts . values ( ) if isinstance ( item , CtsTranslationMetadata ) ] | Find a translation with given language |
7,190 | def update ( self , other ) : if not isinstance ( other , CtsTextgroupMetadata ) : raise TypeError ( "Cannot add %s to CtsTextgroupMetadata" % type ( other ) ) elif str ( self . urn ) != str ( other . urn ) : raise InvalidURN ( "Cannot add CtsTextgroupMetadata %s to CtsTextgroupMetadata %s " % ( self . urn , other . urn ) ) for urn , work in other . works . items ( ) : if urn in self . works : self . works [ urn ] . update ( deepcopy ( work ) ) else : self . works [ urn ] = deepcopy ( work ) self . works [ urn ] . parent = self self . works [ urn ] . resource = None return self | Merge two Textgroup Objects . |
7,191 | def get ( self , tags = [ ] , trigger_ids = [ ] ) : params = { } if len ( tags ) > 0 : params [ 'tags' ] = ',' . join ( tags ) if len ( trigger_ids ) > 0 : params [ 'triggerIds' ] = ',' . join ( trigger_ids ) url = self . _service_url ( 'triggers' , params = params ) triggers_dict = self . _get ( url ) return Trigger . list_to_object_list ( triggers_dict ) | Get triggers with optional filtering . Querying without parameters returns all the trigger definitions . |
7,192 | def create ( self , trigger ) : data = self . _serialize_object ( trigger ) if isinstance ( trigger , FullTrigger ) : returned_dict = self . _post ( self . _service_url ( [ 'triggers' , 'trigger' ] ) , data ) return FullTrigger ( returned_dict ) else : returned_dict = self . _post ( self . _service_url ( 'triggers' ) , data ) return Trigger ( returned_dict ) | Create a new trigger . |
7,193 | def update ( self , trigger_id , full_trigger ) : data = self . _serialize_object ( full_trigger ) rdict = self . _put ( self . _service_url ( [ 'triggers' , 'trigger' , trigger_id ] ) , data ) return FullTrigger ( rdict ) | Update an existing full trigger . |
7,194 | def create_group ( self , trigger ) : data = self . _serialize_object ( trigger ) return Trigger ( self . _post ( self . _service_url ( [ 'triggers' , 'groups' ] ) , data ) ) | Create a new group trigger . |
7,195 | def group_members ( self , group_id , include_orphans = False ) : params = { 'includeOrphans' : str ( include_orphans ) . lower ( ) } url = self . _service_url ( [ 'triggers' , 'groups' , group_id , 'members' ] , params = params ) return Trigger . list_to_object_list ( self . _get ( url ) ) | Find all group member trigger definitions |
7,196 | def update_group ( self , group_id , trigger ) : data = self . _serialize_object ( trigger ) self . _put ( self . _service_url ( [ 'triggers' , 'groups' , group_id ] ) , data , parse_json = False ) | Update an existing group trigger definition and its member definitions . |
7,197 | def delete_group ( self , group_id , keep_non_orphans = False , keep_orphans = False ) : params = { 'keepNonOrphans' : str ( keep_non_orphans ) . lower ( ) , 'keepOrphans' : str ( keep_orphans ) . lower ( ) } self . _delete ( self . _service_url ( [ 'triggers' , 'groups' , group_id ] , params = params ) ) | Delete a group trigger |
7,198 | def create_group_member ( self , member ) : data = self . _serialize_object ( member ) return Trigger ( self . _post ( self . _service_url ( [ 'triggers' , 'groups' , 'members' ] ) , data ) ) | Create a new member trigger for a parent trigger . |
7,199 | def set_group_conditions ( self , group_id , conditions , trigger_mode = None ) : data = self . _serialize_object ( conditions ) if trigger_mode is not None : url = self . _service_url ( [ 'triggers' , 'groups' , group_id , 'conditions' , trigger_mode ] ) else : url = self . _service_url ( [ 'triggers' , 'groups' , group_id , 'conditions' ] ) response = self . _put ( url , data ) return Condition . list_to_object_list ( response ) | Set the group conditions . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.